diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile
deleted file mode 100644
index 4234de160..000000000
--- a/.ci/Jenkinsfile
+++ /dev/null
@@ -1,81 +0,0 @@
-def golang = ['1.23', '1.24']
-def golangDefault = "golang:${golang.last()}"
-
-async {
-
- for (version in golang) {
- def go = version
-
- task("test/go${go}") {
- container("golang:${go}") {
- sh 'make test'
- }
- }
-
- task("build/go${go}") {
- container("golang:${go}") {
- for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
- sh """
- make bin/frostfs-${app}
- bin/frostfs-${app} --version
- """
- }
- }
- }
- }
-
- task('test/race') {
- container(golangDefault) {
- sh 'make test GOFLAGS="-count=1 -race"'
- }
- }
-
- task('lint') {
- container(golangDefault) {
- sh 'make lint-install lint'
- }
- }
-
- task('staticcheck') {
- container(golangDefault) {
- sh 'make staticcheck-install staticcheck-run'
- }
- }
-
- task('gopls') {
- container(golangDefault) {
- sh 'make gopls-install gopls-run'
- }
- }
-
- task('gofumpt') {
- container(golangDefault) {
- sh '''
- make fumpt-install
- make fumpt
- git diff --exit-code --quiet
- '''
- }
- }
-
- task('vulncheck') {
- container(golangDefault) {
- sh '''
- go install golang.org/x/vuln/cmd/govulncheck@latest
- govulncheck ./...
- '''
- }
- }
-
- task('pre-commit') {
- dockerfile("""
- FROM ${golangDefault}
- RUN apt update && \
- apt install -y --no-install-recommends pre-commit
- """) {
- withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
- sh 'pre-commit run --color=always --hook-stage=manual --all-files'
- }
- }
- }
-}
diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm
index 42aeebc48..eeccaab79 100644
--- a/.docker/Dockerfile.adm
+++ b/.docker/Dockerfile.adm
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci
index 9ddd8de59..ef6586a64 100644
--- a/.docker/Dockerfile.ci
+++ b/.docker/Dockerfile.ci
@@ -1,4 +1,4 @@
-FROM golang:1.23
+FROM golang:1.21
WORKDIR /tmp
diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli
index 16f130056..0dd4cebcf 100644
--- a/.docker/Dockerfile.cli
+++ b/.docker/Dockerfile.cli
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir
index c119f8127..4015df673 100644
--- a/.docker/Dockerfile.ir
+++ b/.docker/Dockerfile.ir
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage
index 854f7adea..ced6ea538 100644
--- a/.docker/Dockerfile.storage
+++ b/.docker/Dockerfile.storage
@@ -1,4 +1,4 @@
-FROM golang:1.23 AS builder
+FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml
index d568b9607..3c74d9434 100644
--- a/.forgejo/workflows/build.yml
+++ b/.forgejo/workflows/build.yml
@@ -1,10 +1,6 @@
name: Build
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
build:
@@ -12,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.23', '1.24' ]
+ go_versions: [ '1.20', '1.21' ]
steps:
- uses: actions/checkout@v3
diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml
index 190d7764a..6746408aa 100644
--- a/.forgejo/workflows/dco.yml
+++ b/.forgejo/workflows/dco.yml
@@ -13,9 +13,9 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.21'
- name: Run commit format checker
- uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
+ uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'
diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml
deleted file mode 100644
index fe91d65f9..000000000
--- a/.forgejo/workflows/oci-image.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: OCI image
-
-on:
- push:
- workflow_dispatch:
-
-jobs:
- image:
- name: Build container images
- runs-on: docker
- container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
- steps:
- - name: Clone git repo
- uses: actions/checkout@v3
-
- - name: Build OCI image
- run: make images
-
- - name: Push image to OCI registry
- run: |
- echo "$REGISTRY_PASSWORD" \
- | docker login --username truecloudlab --password-stdin git.frostfs.info
- make push-images
- if: >-
- startsWith(github.ref, 'refs/tags/v') &&
- (github.event_name == 'workflow_dispatch' || github.event_name == 'push')
- env:
- REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml
deleted file mode 100644
index c2e293175..000000000
--- a/.forgejo/workflows/pre-commit.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: Pre-commit hooks
-
-on:
- pull_request:
- push:
- branches:
- - master
-
-jobs:
- precommit:
- name: Pre-commit
- env:
- # Skip pre-commit hooks which are executed by other actions.
- SKIP: make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt
- runs-on: ubuntu-22.04
- # If we use actions/setup-python from either Github or Gitea,
- # the line above fails with a cryptic error about not being able to find python.
- # So install everything manually.
- steps:
- - uses: actions/checkout@v3
- - name: Set up Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.24
- - name: Set up Python
- run: |
- apt update
- apt install -y pre-commit
- - name: Run pre-commit
- run: pre-commit run --color=always --hook-stage manual --all-files
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index f3f5432ce..f66a2c401 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -1,10 +1,5 @@
name: Tests and linters
-
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
lint:
@@ -16,7 +11,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.21'
cache: true
- name: Install linters
@@ -30,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.23', '1.24' ]
+ go_versions: [ '1.20', '1.21' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -53,7 +48,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.21'
cache: true
- name: Run tests
@@ -68,7 +63,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
+ go-version: '1.21'
cache: true
- name: Install staticcheck
@@ -76,41 +71,3 @@ jobs:
- name: Run staticcheck
run: make staticcheck-run
-
- gopls:
- name: gopls check
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
-
- - name: Set up Go
- uses: actions/setup-go@v3
- with:
- go-version: '1.22'
- cache: true
-
- - name: Install gopls
- run: make gopls-install
-
- - name: Run gopls
- run: make gopls-run
-
- fumpt:
- name: Run gofumpt
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
-
- - name: Set up Go
- uses: actions/setup-go@v3
- with:
- go-version: '1.24'
- cache: true
-
- - name: Install gofumpt
- run: make fumpt-install
-
- - name: Run gofumpt
- run: |
- make fumpt
- git diff --exit-code --quiet
diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml
index bc94792d8..8ea01749b 100644
--- a/.forgejo/workflows/vulncheck.yml
+++ b/.forgejo/workflows/vulncheck.yml
@@ -1,10 +1,5 @@
name: Vulncheck
-
-on:
- pull_request:
- push:
- branches:
- - master
+on: [pull_request]
jobs:
vulncheck:
@@ -18,8 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.24'
- check-latest: true
+ go-version: '1.21'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/bug_report.md
rename to .github/ISSUE_TEMPLATE/bug_report.md
diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/config.yml
rename to .github/ISSUE_TEMPLATE/config.yml
diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
similarity index 100%
rename from .forgejo/ISSUE_TEMPLATE/feature_request.md
rename to .github/ISSUE_TEMPLATE/feature_request.md
diff --git a/.forgejo/logo.svg b/.github/logo.svg
similarity index 100%
rename from .forgejo/logo.svg
rename to .github/logo.svg
diff --git a/.gitlint b/.gitlint
new file mode 100644
index 000000000..e7218ac53
--- /dev/null
+++ b/.gitlint
@@ -0,0 +1,11 @@
+[general]
+fail-without-commits=True
+regex-style-search=True
+contrib=CC1
+
+[title-match-regex]
+regex=^\[\#[0-9Xx]+\]\s
+
+[ignore-by-title]
+regex=^Release(.*)
+ignore=title-match-regex
diff --git a/.golangci.yml b/.golangci.yml
index e3ec09f60..ef99fc152 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,107 +1,83 @@
-version: "2"
+# This file contains all available configuration options
+# with their default values.
+
+# options for analysis running
run:
+ # timeout for analysis, e.g. 30s, 5m, default is 1m
+ timeout: 20m
+
+ # include test files or not, default is true
tests: false
+
+# output configuration options
output:
- formats:
- tab:
- path: stdout
- colors: false
+ # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
+ format: tab
+
+# all available settings of specific linters
+linters-settings:
+ exhaustive:
+ # indicates that switch statements are to be considered exhaustive if a
+ # 'default' case is present, even if all enum members aren't listed in the
+ # switch
+ default-signifies-exhaustive: true
+ govet:
+ # report about shadowed variables
+ check-shadowing: false
+ staticcheck:
+ checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
+ funlen:
+ lines: 80 # default 60
+ statements: 60 # default 40
+ gocognit:
+ min-complexity: 40 # default 30
+ importas:
+ no-unaliased: true
+ no-extra-aliases: false
+ alias:
+ pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
+ alias: objectSDK
+ custom:
+ truecloudlab-linters:
+ path: bin/external_linters.so
+ original-url: git.frostfs.info/TrueCloudLab/linters.git
+ settings:
+ noliteral:
+ target-methods : ["reportFlushError", "reportError"]
+ disable-packages: ["codes", "err", "res","exec"]
+ constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+
linters:
- default: none
enable:
- - bidichk
- - containedctx
- - contextcheck
- - copyloopvar
- - durationcheck
- - errcheck
- - exhaustive
- - funlen
- - gocognit
- - gocritic
- - godot
- - importas
- - ineffassign
- - intrange
- - misspell
- - perfsprint
- - predeclared
- - protogetter
- - reassign
+ # mandatory linters
+ - govet
- revive
+
+ # some default golangci-lint linters
+ - errcheck
+ - gosimple
+ - godot
+ - ineffassign
- staticcheck
- - testifylint
- - truecloudlab-linters
- - unconvert
- - unparam
+ - typecheck
- unused
- - usetesting
- - whitespace
- settings:
- exhaustive:
- default-signifies-exhaustive: true
- funlen:
- lines: 80
- statements: 60
- gocognit:
- min-complexity: 40
- gocritic:
- disabled-checks:
- - ifElseChain
- importas:
- alias:
- - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
- alias: objectSDK
- no-unaliased: true
- no-extra-aliases: false
- staticcheck:
- checks:
- - all
- - -QF1002
- unused:
- field-writes-are-uses: false
- exported-fields-are-used: false
- local-variables-are-used: false
- custom:
- truecloudlab-linters:
- path: bin/linters/external_linters.so
- original-url: git.frostfs.info/TrueCloudLab/linters.git
- settings:
- noliteral:
- constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs
- disable-packages:
- - codes
- - err
- - res
- - exec
- target-methods:
- - reportFlushError
- - reportError
- exclusions:
- generated: lax
- presets:
- - comments
- - common-false-positives
- - legacy
- - std-error-handling
- paths:
- - third_party$
- - builtin$
- - examples$
-formatters:
- enable:
- - gci
+
+ # extra linters
+ - bidichk
+ - durationcheck
+ - exhaustive
+ - exportloopref
- gofmt
- goimports
- settings:
- gci:
- sections:
- - standard
- - default
- custom-order: true
- exclusions:
- generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
+ - misspell
+ - predeclared
+ - reassign
+ - whitespace
+ - containedctx
+ - funlen
+ - gocognit
+ - contextcheck
+ - importas
+ - truecloudlab-linters
+ disable-all: true
+ fast: false
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d2d90fa5c..5f20bdc84 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,8 +2,15 @@ ci:
autofix_prs: false
repos:
+ - repo: https://github.com/jorisroovers/gitlint
+ rev: v0.19.1
+ hooks:
+ - id: gitlint
+ stages: [commit-msg]
+ - id: gitlint-ci
+
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.5.0
+ rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
@@ -16,10 +23,10 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
- exclude: "(.key|.svg)$"
+ exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
- rev: v0.9.0.6
+ rev: v0.9.0.5
hooks:
- id: shellcheck
@@ -35,16 +42,7 @@ repos:
hooks:
- id: go-unit-tests
name: go unit tests
- entry: make test GOFLAGS=''
- pass_filenames: false
- types: [go]
- language: system
-
- - repo: local
- hooks:
- - id: gofumpt
- name: gofumpt
- entry: make fumpt
+ entry: make test
pass_filenames: false
types: [go]
language: system
diff --git a/.woodpecker/pre-commit.yml b/.woodpecker/pre-commit.yml
new file mode 100644
index 000000000..bdf3402de
--- /dev/null
+++ b/.woodpecker/pre-commit.yml
@@ -0,0 +1,11 @@
+pipeline:
+ # Kludge for non-root containers under WoodPecker
+ fix-ownership:
+ image: alpine:latest
+ commands: chown -R 1234:1234 .
+
+ pre-commit:
+ image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
+ commands:
+ - export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
+ - pre-commit run --hook-stage manual
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 92c84ab16..00837c79b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,129 +3,6 @@ Changelog for FrostFS Node
## [Unreleased]
-### Added
-### Changed
-### Fixed
-### Removed
-### Updated
-
-## [v0.44.0] - 2024-25-11 - Rongbuk
-
-### Added
-- Allow to prioritize nodes during GET traversal via attributes (#1439)
-- Add metrics for the frostfsid cache (#1464)
-- Customize constant attributes attached to every tracing span (#1488)
-- Manage additional keys in the `frostfsid` contract (#1505)
-- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519)
-
-### Changed
-- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396)
-- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515)
-
-### Fixed
-- Fix EC object search (#1408)
-- Fix EC object put when one of the nodes is unavailable (#1427)
-
-### Removed
-- Drop most of the eACL-related code (#1425)
-- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483)
-
-### Upgrading from v0.43.0
-The metabase schema has changed completely, resync is required.
-
-## [v0.42.0]
-
-### Added
-- Add audit logs for gRPC requests (#1184)
-- Add CLI command to convert eACL to APE (#1189)
-- Add `--await` flag to `control set-status` (#60)
-- `app_info` metric for binary version (#1154)
-- `--quiet` flag for healthcheck command (#1209)
-
-### Changed
-- Deprecate Container.SetEACL RPC (#1219)
-
-### Fixed
-- Take groups into account during APE processing (#1190)
-- Handle double SIGHUP correctly (#1145)
-- Handle empty filenames in tree listing (#1074)
-- Handle duplicate tree nodes in the split-brain scenario (#1234, #1251)
-- Remove APE pre-check in Object.GET/HEAD/RANGE RPC (#1249)
-- Delete EC gc marks and split info (#1257)
-- Do not search for non-existent objects on deletion (#1261)
-
-### Updated
-- Make putting EC chunks more robust (#1233)
-
-## [v0.41.0]
-
-### Added
-- Support mTLS for morph client (#1170)
-
-### Fixed
-- Update shard state metric during shard init (#1174)
-- Handle ENOSPC in blobovnicza (#1166)
-- Handle multiple split-infos for EC objects (#1163)
-- Set `Disabled` mode as the default for components (#1168)
-
-## [v0.40.0]
-
-### Added
-- Support EC chunk reconstruction in policer (#1129)
-- Support LOCK, DELETE and SEARCH methods on EC objects (#1147, 1144)
-- apemanager service to manage APE chains (#1105)
-
-### Fixed
-- Properly verify GetRangeHash response (#1083)
-- Send `MONOTONIC_USEC` in sdnotify on reload (#1135)
-
-### Updated
-- neo-go to `v0.106.0`
-
-## [v0.39.0]
-
-### Added
-- Preliminary erasure coding support (#1065, #1112, #1103, #1120)
-- TTL cache for blobovnicza tree (#1004)
-- Cache for frostfsid and policy contracts (#1117)
-- Writecache path to metric labels (#966)
-- Documentation for authentication mechanisms (#1097, #1104)
-- Metrics for metabase resync status (#1029)
-
-### Changed
-- Speed up metabase resync (#1024)
-
-### Fixed
-- Possible panic in GET_RANGE (#1077)
-
-### Updated
-- Minimum required Go version to 1.21
-
-## [v0.38.0]
-
-### Added
-- Add `trace_id` to logs in `frostfs-node` (#146)
-- Allow to forcefully remove container from IR (#733)
-- LOKI support (#740)
-- Allow sealing writecache (#569)
-- Support tree service in data evacuation (#947)
-- Use new policy engine mechanism for access control (#770, #804)
-- Log about active notary deposit waiting (#963)
-
-### Changed
-- Sort output in `frostfs-cli` subcommands (#333)
-- Send bootstrap query at each epoch tick (#721)
-- Do not retain garbage in fstree on systems supporting O_TMPFILE (#970)
-
-### Fixed
-- Handle synchronization failures better in tree service (#741)
-- Fix invalid batch size for iterator traversal in morph (#1000)
-
-### Updated
-- `neo-go` to `v0.105.0`
-
-## [v0.37.0]
-
### Added
- Support impersonate bearer token (#229)
- Change log level on SIGHUP for ir (#125)
diff --git a/CODEOWNERS b/CODEOWNERS
deleted file mode 100644
index d19c96a5c..000000000
--- a/CODEOWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
-.forgejo/.* @potyarkin
-Makefile @potyarkin
diff --git a/Makefile b/Makefile
index 575eaae6f..604115469 100755
--- a/Makefile
+++ b/Makefile
@@ -1,23 +1,15 @@
#!/usr/bin/make -f
SHELL = bash
-.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
-HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
+HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
-GO_VERSION ?= 1.23
-LINT_VERSION ?= 2.0.2
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
-PROTOC_VERSION ?= 25.0
-PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
-PROTOC_OS_VERSION=osx-x86_64
-ifeq ($(shell uname), Linux)
- PROTOC_OS_VERSION=linux-x86_64
-endif
-STATICCHECK_VERSION ?= 2025.1.1
+GO_VERSION ?= 1.21
+LINT_VERSION ?= 1.54.0
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
ARCH = amd64
BIN = bin
@@ -28,32 +20,18 @@ DIRS = $(BIN) $(RELEASE)
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
BINS = $(addprefix $(BIN)/, $(CMDS))
-OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
+# .deb package versioning
+OS_RELEASE = $(shell lsb_release -cs)
+PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
+ sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
+ sed "s/-/~/")-${OS_RELEASE}
+
+OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
-PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
-PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
-PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
-STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
-STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
-SOURCES = $(shell find . -type f -name "*.go" -print)
-
-GOFUMPT_VERSION ?= v0.7.0
-GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
-GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
-
-GOPLS_VERSION ?= v0.17.1
-GOPLS_DIR ?= $(abspath $(BIN))/gopls
-GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
-GOPLS_TEMP_FILE := $(shell mktemp)
-
-FROSTFS_CONTRACTS_PATH=$(abspath ./../frostfs-contract)
-LOCODE_DB_PATH=$(abspath ./.cache/locode_db)
-LOCODE_DB_VERSION=v0.4.0
-
-.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
- prepare-release pre-commit unpre-commit
+.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
+ prepare-release debpackage pre-commit unpre-commit
# To build a specific binary, use it's name prefix with bin/ as a target
# For example `make bin/frostfs-node` will build only storage node binary
@@ -92,37 +70,24 @@ dep:
CGO_ENABLED=0 \
go mod tidy -v && echo OK
-# Build export-metrics
-export-metrics: dep
- @printf "⇒ Build export-metrics\n"
- CGO_ENABLED=0 \
- go build -v -trimpath -o bin/export-metrics ./scripts/export-metrics
-
# Regenerate proto files:
protoc:
- @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
- make protoc-install; \
- fi
- @for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
+ @GOPRIVATE=github.com/TrueCloudLab go mod vendor
+ # Install specific version for protobuf lib
+ @go list -f '{{.Path}}/...@{{.Version}}' -m github.com/golang/protobuf | xargs go install -v
+ @GOBIN=$(abspath $(BIN)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
+ # Protoc generate
+ @for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
echo "⇒ Processing $$f "; \
- $(PROTOC_DIR)/bin/protoc \
- --proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
- --plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
+ protoc \
+ --proto_path=.:./vendor:/usr/local/include \
+ --plugin=protoc-gen-go-frostfs=$(BIN)/protogen \
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
+ --go_out=. --go_opt=paths=source_relative \
--go-grpc_opt=require_unimplemented_servers=false \
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
done
-
-# Install protoc
-protoc-install:
- @rm -rf $(PROTOBUF_DIR)
- @mkdir -p $(PROTOBUF_DIR)
- @echo "⇒ Installing protoc... "
- @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
- @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
- @rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
- @echo "⇒ Instaling protogen FrostFS plugin..."
- @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
+ rm -rf vendor
# Build FrostFS component's docker image
image-%:
@@ -140,15 +105,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
-# Push FrostFS components' docker image to the registry
-push-image-%:
- @echo "⇒ Publish FrostFS $* docker image "
- @docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
-
-# Push all Docker images to the registry
-.PHONY: push-images
-push-images: push-image-storage push-image-ir push-image-cli push-image-adm
-
# Run `make %` in Golang container
docker/%:
docker run --rm -t \
@@ -160,109 +116,51 @@ docker/%:
# Run all code formatters
-fmts: fumpt imports
+fmts: fmt imports
+
+# Reformat code
+fmt:
+ @echo "⇒ Processing gofmt check"
+ @gofmt -s -w cmd/ pkg/ misc/
# Reformat imports
imports:
@echo "⇒ Processing goimports check"
@goimports -w cmd/ pkg/ misc/
-# Install gofumpt
-fumpt-install:
- @rm -rf $(GOFUMPT_DIR)
- @mkdir -p $(GOFUMPT_DIR)
- @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
-
-# Run gofumpt
-fumpt:
- @if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \
- make fumpt-install; \
- fi
- @echo "⇒ Processing gofumpt check"
- $(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/
-
# Run Unit Test with go test
-test: GOFLAGS ?= "-count=1"
test:
@echo "⇒ Running go test"
- @GOFLAGS="$(GOFLAGS)" go test ./...
+ @go test ./... -count=1
-# Install Gerrit commit-msg hook
-review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
-review-install:
- @git config remote.review.url \
- || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
- @mkdir -p $(GIT_HOOK_DIR)/
- @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
- @chmod +x $(GIT_HOOK_DIR)/commit-msg
- @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
- @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
-
-# Create a PR in Gerrit
-review: BRANCH ?= master
-review:
- @git push review HEAD:refs/for/$(BRANCH) \
- --push-option r=e.stratonikov@yadro.com \
- --push-option r=d.stepanov@yadro.com \
- --push-option r=an.nikiforov@yadro.com \
- --push-option r=a.arifullin@yadro.com \
- --push-option r=ekaterina.lebedeva@yadro.com \
- --push-option r=a.savchuk@yadro.com \
- --push-option r=a.chuprov@yadro.com
-
-# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
-lint-install: $(BIN)
- @rm -rf $(OUTPUT_LINT_DIR)
- @mkdir -p $(OUTPUT_LINT_DIR)
+lint-install:
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
- @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION)
+ @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
- make lint-install; \
+ echo "Run make lint-install"; \
+ exit 1; \
fi
$(LINT_DIR)/golangci-lint run
# Install staticcheck
staticcheck-install:
- @rm -rf $(STATICCHECK_DIR)
- @mkdir -p $(STATICCHECK_DIR)
- @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
+ @go install honnef.co/go/tools/cmd/staticcheck@latest
# Run staticcheck
staticcheck-run:
- @if [ ! -d "$(STATICCHECK_VERSION_DIR)" ]; then \
- make staticcheck-install; \
- fi
- @$(STATICCHECK_VERSION_DIR)/staticcheck ./...
-
-# Install gopls
-gopls-install:
- @rm -rf $(GOPLS_DIR)
- @mkdir -p $(GOPLS_DIR)
- @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
-
-# Run gopls
-gopls-run:
- @if [ ! -d "$(GOPLS_VERSION_DIR)" ]; then \
- make gopls-install; \
- fi
- $(GOPLS_VERSION_DIR)/gopls check $(SOURCES) 2>&1 >$(GOPLS_TEMP_FILE)
- @if [[ $$(wc -l < $(GOPLS_TEMP_FILE)) -ne 0 ]]; then \
- cat $(GOPLS_TEMP_FILE); \
- exit 1; \
- fi
- rm $(GOPLS_TEMP_FILE)
+ @staticcheck ./...
# Run linters in Docker
docker/lint:
@@ -286,37 +184,19 @@ version:
# Delete built artifacts
clean:
+ rm -rf vendor
rm -rf .cache
rm -rf $(BIN)
rm -rf $(RELEASE)
-# Download locode database
-locode-download:
- mkdir -p $(TMP_DIR)
- @wget -q -O ./$(TMP_DIR)/locode_db.gz 'https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/download/${LOCODE_DB_VERSION}/locode_db.gz'
- gzip -dfk ./$(TMP_DIR)/locode_db.gz
+# Package for Debian
+debpackage:
+ dch -b --package frostfs-node \
+ --controlmaint \
+ --newversion $(PKG_VERSION) \
+ --distribution $(OS_RELEASE) \
+ "Please see CHANGELOG.md for code changes for $(VERSION)"
+ dpkg-buildpackage --no-sign -b
-# Start dev environment
-env-up: all
- docker compose -f dev/docker-compose.yml up -d
- @if [ ! -d "$(FROSTFS_CONTRACTS_PATH)" ]; then \
- echo "Frostfs contracts not found"; exit 1; \
- fi
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
- ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
- --storage-wallet ./dev/storage/wallet01.json \
- --storage-wallet ./dev/storage/wallet02.json \
- --storage-wallet ./dev/storage/wallet03.json \
- --storage-wallet ./dev/storage/wallet04.json
-
- @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
- make locode-download; \
- fi
- mkdir -p ./$(TMP_DIR)/state
- mkdir -p ./$(TMP_DIR)/storage
-
-# Shutdown dev environment
-env-down:
- docker compose -f dev/docker-compose.yml down -v
- rm -rf ./$(TMP_DIR)/state
- rm -rf ./$(TMP_DIR)/storage
+debclean:
+ dh clean
diff --git a/README.md b/README.md
index 0109ed0e5..c3a9bf09c 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -7,8 +7,9 @@
---
-[](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node)
-
+[](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
+
+
# Overview
@@ -32,8 +33,8 @@ manipulate large amounts of data without paying a prohibitive price.
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
protocol gateways for popular protocols such as [AWS
-S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw),
-[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw),
+S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
+[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
developers to integrate applications without rewriting their code.
@@ -44,11 +45,11 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
platforms will be officially supported after release `1.0`.
The latest version of frostfs-node works with frostfs-contract
-[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2).
+[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
# Building
-To make all binaries you need Go 1.22+ and `make`:
+To make all binaries you need Go 1.20+ and `make`:
```
make all
```
@@ -70,50 +71,11 @@ make docker/bin/frostfs- # build a specific binary
## Docker images
-To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use:
+To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
```
make images
```
-# Debugging
-
-## VSCode
-
-To run and debug single node cluster with VSCode:
-
-1. Clone and build [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract) repository to the same directory level as `frostfs-node`. For example:
-
-```
-/
-├── src
- ├── frostfs-node
- └── frostfs-contract
-```
-See `frostfs-contract`'s README.md for build instructions.
-
-2. Copy `launch.json` and `tasks.json` from `dev/.vscode-example` directory to `.vscode` directory. If you already have such files in `.vscode` directory, then merge them manually.
-
-3. Go to **Run and Debug** (`Ctrl+Shift+D`) and start `IR+Storage node` configuration.
-
-4. To create container and put object into it run (container and object IDs will be different):
-
-```
-./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await
-Enter password > <- press ENTER, the is no password for wallet
-CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
-
-./bin/frostfs-cli object put -r 127.0.0.1:8080 --wallet ./dev/wallet.json --file README.md --cid CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
-Enter password >
- 4300 / 4300 [===========================================================================================================================================================================================================] 100.00% 0s
-[README.md] Object successfully stored
- OID: 78sohnudVMnPsczXqsTUcvezosan2YDNVZwDE8Kq5YwU
- CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju
-
-./bin/frostfs-cli object get -r 127.0.0.1:8080 --wallet ./dev/wallet.json --cid CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju --oid 78sohnudVMnPsczXqsTUcvezosan2YDNVZwDE8Kq5YwU
-...
-
-```
-
# Contributing
Feel free to contribute to this project after reading the [contributing
@@ -124,7 +86,7 @@ the feature/topic you are going to implement.
# Credits
-FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and
+FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
contributions from community members.
Please see [CREDITS](CREDITS.md) for details.
diff --git a/VERSION b/VERSION
index 9052dab96..e1d6235d3 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.44.0
+v0.36.0
diff --git a/cmd/frostfs-adm/README.md b/cmd/frostfs-adm/README.md
index 3dfcc8781..f3dbe6950 100644
--- a/cmd/frostfs-adm/README.md
+++ b/cmd/frostfs-adm/README.md
@@ -56,8 +56,7 @@ credentials: # passwords for consensus node / alphabet wallets
#### Network deployment
- `generate-alphabet` generates a set of wallets for consensus and
- Alphabet nodes. The list of the name for alphabet wallets(no gaps between names allowed, order is important):
- - az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk
+ Alphabet nodes.
- `init` initializes the sidechain by deploying smart contracts and
setting provided FrostFS network configuration.
diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md
index b4b1ed8e4..3b9f4c98c 100644
--- a/cmd/frostfs-adm/docs/deploy.md
+++ b/cmd/frostfs-adm/docs/deploy.md
@@ -9,8 +9,8 @@ related configuration details.
To follow this guide you need:
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
-- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment),
-- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment).
+- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment),
+- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment).
## Step 1: Prepare network configuration
@@ -34,8 +34,6 @@ alphabet-wallets: /home/user/deploy/alphabet-wallets
network:
max_object_size: 67108864
epoch_duration: 240
- max_ec_data_count: 12
- max_ec_parity_count: 4
fee:
candidate: 0
container: 0
@@ -64,11 +62,6 @@ alphabet-wallets: /home/user/deploy/alphabet-wallets
wallet[0]: hunter2
```
-This command generates wallets with the following names:
- - az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk
-
-No gaps between names allowed, order is important.
-
Do not lose wallet files and network config. Store it in an encrypted backed up
storage.
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index f194e97f5..2f1ae4cc5 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -11,40 +11,4 @@ const (
Verbose = "verbose"
VerboseShorthand = "v"
VerboseUsage = "Verbose output"
-
- EndpointFlag = "rpc-endpoint"
- EndpointFlagDesc = "N3 RPC node endpoint"
- EndpointFlagShort = "r"
-
- WalletPath = "wallet"
- WalletPathShorthand = "w"
- WalletPathUsage = "Path to the wallet"
-
- AlphabetWalletsFlag = "alphabet-wallets"
- AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
-
- AdminWalletPath = "wallet-admin"
- AdminWalletUsage = "Path to the admin wallet"
-
- LocalDumpFlag = "local-dump"
- ProtoConfigPath = "protocol"
- ContractsInitFlag = "contracts"
- ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
- ContractsURLFlag = "contracts-url"
- ContractsURLFlagDesc = "URL to archive with compiled FrostFS contracts"
- EpochDurationInitFlag = "network.epoch_duration"
- MaxObjectSizeInitFlag = "network.max_object_size"
- MaxECDataCountFlag = "network.max_ec_data_count"
- MaxECParityCounFlag = "network.max_ec_parity_count"
- RefillGasAmountFlag = "gas"
- StorageWalletFlag = "storage-wallet"
- ContainerFeeInitFlag = "network.fee.container"
- ContainerAliasFeeInitFlag = "network.fee.container_alias"
- CandidateFeeInitFlag = "network.fee.candidate"
- WithdrawFeeInitFlag = "network.fee.withdraw"
- MaintenanceModeAllowedInitFlag = "network.maintenance_mode_allowed"
- HomomorphicHashDisabledInitFlag = "network.homomorphic_hash_disabled"
- CustomZoneFlag = "domain"
- AlphabetSizeFlag = "size"
- AllFlag = "all"
)
diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go
index 69153f0d7..9dfab9f85 100644
--- a/cmd/frostfs-adm/internal/modules/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/config/config.go
@@ -21,8 +21,6 @@ type configTemplate struct {
CandidateFee int
ContainerFee int
ContainerAliasFee int
- MaxECDataCount int
- MaxECParityCount int
WithdrawFee int
Glagolitics []string
HomomorphicHashDisabled bool
@@ -33,8 +31,6 @@ alphabet-wallets: {{ .AlphabetDir}}
network:
max_object_size: {{ .MaxObjectSize}}
epoch_duration: {{ .EpochDuration}}
- max_ec_data_count: {{ .MaxECDataCount}}
- max_ec_parity_count: {{ .MaxECParityCount}}
homomorphic_hash_disabled: {{ .HomomorphicHashDisabled}}
fee:
candidate: {{ .CandidateFee}}
@@ -54,12 +50,12 @@ func initConfig(cmd *cobra.Command, _ []string) error {
}
pathDir := filepath.Dir(configPath)
- err = os.MkdirAll(pathDir, 0o700)
+ err = os.MkdirAll(pathDir, 0700)
if err != nil {
return fmt.Errorf("create dir %s: %w", pathDir, err)
}
- f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0o600)
+ f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0600)
if err != nil {
return fmt.Errorf("open %s: %w", configPath, err)
}
@@ -110,8 +106,6 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl := configTemplate{
Endpoint: "https://neo.rpc.node:30333",
MaxObjectSize: 67108864, // 64 MiB
- MaxECDataCount: 12, // Tested with 16-node networks, assuming 12 data + 4 parity nodes.
- MaxECParityCount: 4, // Maximum 4 parity chunks, typically <= 3 for most policies.
EpochDuration: 240, // 1 hour with 15s per block
HomomorphicHashDisabled: false, // object homomorphic hash is enabled
CandidateFee: 100_0000_0000, // 100.0 GAS (Fixed8)
@@ -128,7 +122,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
var i innerring.GlagoliticLetter
- for i = range innerring.GlagoliticLetter(credSize) {
+ for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
}
diff --git a/cmd/frostfs-adm/internal/modules/config/config_test.go b/cmd/frostfs-adm/internal/modules/config/config_test.go
index beb1210e1..3fa800a06 100644
--- a/cmd/frostfs-adm/internal/modules/config/config_test.go
+++ b/cmd/frostfs-adm/internal/modules/config/config_test.go
@@ -27,8 +27,6 @@ func TestGenerateConfigExample(t *testing.T) {
require.Equal(t, "https://neo.rpc.node:30333", v.GetString("rpc-endpoint"))
require.Equal(t, filepath.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets"))
require.Equal(t, 67108864, v.GetInt("network.max_object_size"))
- require.Equal(t, 12, v.GetInt("network.max_ec_data_count"))
- require.Equal(t, 4, v.GetInt("network.max_ec_parity_count"))
require.Equal(t, 240, v.GetInt("network.epoch_duration"))
require.Equal(t, 10000000000, v.GetInt("network.fee.candidate"))
require.Equal(t, 1000, v.GetInt("network.fee.container"))
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go
deleted file mode 100644
index d67b70d2a..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/root.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package maintenance
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
- "github.com/spf13/cobra"
-)
-
-var RootCmd = &cobra.Command{
- Use: "maintenance",
- Short: "Section for maintenance commands",
-}
-
-func init() {
- RootCmd.AddCommand(zombie.Cmd)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
deleted file mode 100644
index 1b66889aa..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package zombie
-
-import (
- "crypto/ecdsa"
- "fmt"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/cli/flags"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
- keyDesc := viper.GetString(walletFlag)
- if keyDesc == "" {
- return &nodeconfig.Key(appCfg).PrivateKey
- }
- data, err := os.ReadFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
-
- priv, err := keys.NewPrivateKeyFromBytes(data)
- if err != nil {
- w, err := wallet.NewWalletFromFile(keyDesc)
- commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
- return fromWallet(cmd, w, viper.GetString(addressFlag))
- }
- return &priv.PrivateKey
-}
-
-func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
- var (
- addr util.Uint160
- err error
- )
-
- if addrStr == "" {
- addr = w.GetChangeAddress()
- } else {
- addr, err = flags.ParseAddress(addrStr)
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
- }
-
- acc := w.GetAccount(addr)
- if acc == nil {
- commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
- }
-
- pass, err := getPassword()
- commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
-
- commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
-
- return &acc.PrivateKey().PrivateKey
-}
-
-func getPassword() (string, error) {
- // this check allows empty passwords
- if viper.IsSet("password") {
- return viper.GetString("password"), nil
- }
-
- return input.ReadPassword("Enter password > ")
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
deleted file mode 100644
index f73f33db9..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func list(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- var containerID *cid.ID
- if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
- containerID = &cid.ID{}
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
- }
-
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
- if containerID != nil && a.Container() != *containerID {
- return nil
- }
- cmd.Println(a.EncodeToString())
- return nil
- }))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
deleted file mode 100644
index cd3a64499..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "github.com/spf13/cobra"
-)
-
-func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
- addresses := morphconfig.RPCEndpoint(appCfg)
- if len(addresses) == 0 {
- commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
- }
- key := nodeconfig.Key(appCfg)
- cli, err := client.New(cmd.Context(),
- key,
- client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
- client.WithEndpoints(addresses...),
- client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
- )
- commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
- return cli
-}
-
-func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
- commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
- cc, err := cntClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
- return cc
-}
-
-func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
- hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
- commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
- cli, err := netmapClient.NewFromMorph(morph, hs, 0)
- commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
- return cli
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
deleted file mode 100644
index 27f83aec7..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package zombie
-
-import (
- "context"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-type quarantine struct {
- // mtx protects current field.
- mtx sync.Mutex
- current int
- trees []*fstree.FSTree
-}
-
-func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
- var paths []string
- for _, sh := range engineInfo.Shards {
- var storagePaths []string
- for _, st := range sh.BlobStorInfo.SubStorages {
- storagePaths = append(storagePaths, st.Path)
- }
- if len(storagePaths) == 0 {
- continue
- }
- paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
- }
- q, err := newQuarantine(paths)
- commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
- return q
-}
-
-func commonPath(paths []string) string {
- if len(paths) == 0 {
- return ""
- }
- if len(paths) == 1 {
- return paths[0]
- }
- minLen := math.MaxInt
- for _, p := range paths {
- if len(p) < minLen {
- minLen = len(p)
- }
- }
-
- var sb strings.Builder
- for i := range minLen {
- for _, path := range paths[1:] {
- if paths[0][i] != path[i] {
- return sb.String()
- }
- }
- sb.WriteByte(paths[0][i])
- }
- return sb.String()
-}
-
-func newQuarantine(paths []string) (*quarantine, error) {
- var q quarantine
- for i := range paths {
- f := fstree.New(
- fstree.WithDepth(1),
- fstree.WithDirNameLen(1),
- fstree.WithPath(paths[i]),
- fstree.WithPerm(os.ModePerm),
- )
- if err := f.Open(mode.ComponentReadWrite); err != nil {
- return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
- }
- if err := f.Init(); err != nil {
- return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
- }
- q.trees = append(q.trees, f)
- }
- return &q, nil
-}
-
-func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- for i := range q.trees {
- res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
- if err != nil {
- continue
- }
- return res.Object, nil
- }
- return nil, &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
- for i := range q.trees {
- _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
- if err != nil {
- continue
- }
- return nil
- }
- return &apistatus.ObjectNotFound{}
-}
-
-func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
- data, err := obj.Marshal()
- if err != nil {
- return err
- }
-
- var prm common.PutPrm
- prm.Address = objectcore.AddressOf(obj)
- prm.Object = obj
- prm.RawData = data
-
- q.mtx.Lock()
- current := q.current
- q.current = (q.current + 1) % len(q.trees)
- q.mtx.Unlock()
-
- _, err = q.trees[current].Put(ctx, prm)
- return err
-}
-
-func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
- var prm common.IteratePrm
- prm.Handler = func(elem common.IterationElement) error {
- return f(elem.Address)
- }
- for i := range q.trees {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- _, err := q.trees[i].Iterate(ctx, prm)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
deleted file mode 100644
index 0b8f2f172..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package zombie
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func remove(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- removeObject(cmd, q, addr)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- removeObject(cmd, q, addr)
- return nil
- }))
- }
-}
-
-func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
- err := q.Delete(cmd.Context(), addr)
- if errors.Is(err, new(apistatus.ObjectNotFound)) {
- return
- }
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
deleted file mode 100644
index f179c7c2d..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package zombie
-
-import (
- "crypto/sha256"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-func restore(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- storageEngine := newEngine(cmd, appCfg)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
-
- var containerID cid.ID
- cidStr, _ := cmd.Flags().GetString(cidFlag)
- commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
-
- var objectID *oid.ID
- oidStr, _ := cmd.Flags().GetString(oidFlag)
- if oidStr != "" {
- objectID = &oid.ID{}
- commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
- }
-
- if objectID != nil {
- var addr oid.Address
- addr.SetContainer(containerID)
- addr.SetObject(*objectID)
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- } else {
- commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
- if addr.Container() != containerID {
- return nil
- }
- restoreObject(cmd, storageEngine, q, addr, cnrCli)
- return nil
- }))
- }
-}
-
-func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
- obj, err := q.Get(cmd.Context(), addr)
- commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
- rawCID := make([]byte, sha256.Size)
-
- cid := addr.Container()
- cid.Encode(rawCID)
- cnr, err := cnrCli.Get(cmd.Context(), rawCID)
- commonCmd.ExitOnErr(cmd, "get container: %w", err)
-
- putPrm := engine.PutPrm{
- Object: obj,
- IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
- }
- commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
- commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
deleted file mode 100644
index c8fd9e5e5..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package zombie
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- flagBatchSize = "batch-size"
- flagBatchSizeUsage = "Objects iteration batch size"
- cidFlag = "cid"
- cidFlagUsage = "Container ID"
- oidFlag = "oid"
- oidFlagUsage = "Object ID"
- walletFlag = "wallet"
- walletFlagShorthand = "w"
- walletFlagUsage = "Path to the wallet or binary key"
- addressFlag = "address"
- addressFlagUsage = "Address of wallet account"
- moveFlag = "move"
- moveFlagUsage = "Move objects from storage engine to quarantine"
-)
-
-var (
- Cmd = &cobra.Command{
- Use: "zombie",
- Short: "Zombie objects related commands",
- }
- scanCmd = &cobra.Command{
- Use: "scan",
- Short: "Scan storage engine for zombie objects and move them to quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
- _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
- _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
- _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
- },
- Run: scan,
- }
- listCmd = &cobra.Command{
- Use: "list",
- Short: "List zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- },
- Run: list,
- }
- restoreCmd = &cobra.Command{
- Use: "restore",
- Short: "Restore zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: restore,
- }
- removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove zombie objects from quarantine",
- Long: "",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
- _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
- _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
- _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
- },
- Run: remove,
- }
-)
-
-func init() {
- initScanCmd()
- initListCmd()
- initRestoreCmd()
- initRemoveCmd()
-}
-
-func initScanCmd() {
- Cmd.AddCommand(scanCmd)
-
- scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
- scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
- scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
- scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
-}
-
-func initListCmd() {
- Cmd.AddCommand(listCmd)
-
- listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- listCmd.Flags().String(cidFlag, "", cidFlagUsage)
-}
-
-func initRestoreCmd() {
- Cmd.AddCommand(restoreCmd)
-
- restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
- restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
-
-func initRemoveCmd() {
- Cmd.AddCommand(removeCmd)
-
- removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
- removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
- removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
- removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
deleted file mode 100644
index 268ec4911..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package zombie
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/sha256"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
- "golang.org/x/sync/errgroup"
-)
-
-func scan(cmd *cobra.Command, _ []string) {
- configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
- configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
- if batchSize == 0 {
- commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
- }
- move, _ := cmd.Flags().GetBool(moveFlag)
-
- storageEngine := newEngine(cmd, appCfg)
- morphClient := createMorphClient(cmd, appCfg)
- cnrCli := createContainerClient(cmd, morphClient)
- nmCli := createNetmapClient(cmd, morphClient)
- q := createQuarantine(cmd, storageEngine.DumpInfo())
- pk := getPrivateKey(cmd, appCfg)
-
- epoch, err := nmCli.Epoch(cmd.Context())
- commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
-
- nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
- commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
-
- cmd.Printf("Epoch: %d\n", nm.Epoch())
- cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
-
- ps := &processStatus{
- statusCount: make(map[status]uint64),
- }
-
- stopCh := make(chan struct{})
- start := time.Now()
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- defer wg.Done()
- tick := time.NewTicker(time.Second)
- defer tick.Stop()
- for {
- select {
- case <-cmd.Context().Done():
- return
- case <-stopCh:
- return
- case <-tick.C:
- fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
- }
- }
- }()
- go func() {
- defer wg.Done()
- err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
- close(stopCh)
- }()
- wg.Wait()
- commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
-
- cmd.Println()
- cmd.Println("Status description:")
- cmd.Println("undefined -- nothing is clear")
- cmd.Println("found -- object is found in cluster")
- cmd.Println("quarantine -- object is not found in cluster")
- cmd.Println()
- for status, count := range ps.statusCount {
- cmd.Printf("Status: %s, Count: %d\n", status, count)
- }
-}
-
-type status string
-
-const (
- statusUndefined status = "undefined"
- statusFound status = "found"
- statusQuarantine status = "quarantine"
-)
-
-func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
- rawCID := make([]byte, sha256.Size)
- cid := obj.Address.Container()
- cid.Encode(rawCID)
-
- cnr, err := cnrCli.Get(ctx, rawCID)
- if err != nil {
- var errContainerNotFound *apistatus.ContainerNotFound
- if errors.As(err, &errContainerNotFound) {
- // Policer will deal with this object.
- return statusFound, nil
- }
- return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
- }
- nm, err := nmCli.NetMap(ctx)
- if err != nil {
- return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
- }
-
- nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
- if err != nil {
- // Not enough nodes, check all netmap nodes.
- nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
- }
-
- objID := obj.Address.Object()
- cnrID := obj.Address.Container()
- local := true
- raw := false
- if obj.ECInfo != nil {
- objID = obj.ECInfo.ParentID
- local = false
- raw = true
- }
- prm := clientSDK.PrmObjectHead{
- ObjectID: &objID,
- ContainerID: &cnrID,
- Local: local,
- Raw: raw,
- }
-
- var ni clientCore.NodeInfo
- for i := range nodes {
- for j := range nodes[i] {
- if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
- return statusUndefined, fmt.Errorf("parse node info: %w", err)
- }
- c, err := cc.Get(ni)
- if err != nil {
- continue
- }
- res, err := c.ObjectHead(ctx, prm)
- if err != nil {
- var errECInfo *objectSDK.ECInfoError
- if raw && errors.As(err, &errECInfo) {
- return statusFound, nil
- }
- continue
- }
- if err := apistatus.ErrFromStatus(res.Status()); err != nil {
- continue
- }
- return statusFound, nil
- }
- }
-
- if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
- return statusFound, nil
- }
- return statusQuarantine, nil
-}
-
-func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
- appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
-) error {
- cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
- DialTimeout: apiclientconfig.DialTimeout(appCfg),
- StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
- ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
- Key: pk,
- AllowExternal: apiclientconfig.AllowExternal(appCfg),
- })
- ctx := cmd.Context()
-
- var cursor *engine.Cursor
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var prm engine.ListWithCursorPrm
- prm.WithCursor(cursor)
- prm.WithCount(batchSize)
-
- res, err := storageEngine.ListWithCursor(ctx, prm)
- if err != nil {
- if errors.Is(err, engine.ErrEndOfListing) {
- return nil
- }
- return fmt.Errorf("list with cursor: %w", err)
- }
-
- cursor = res.Cursor()
- addrList := res.AddressList()
- eg, egCtx := errgroup.WithContext(ctx)
- eg.SetLimit(int(batchSize))
-
- for i := range addrList {
- addr := addrList[i]
- eg.Go(func() error {
- result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
- if err != nil {
- return fmt.Errorf("check object %s status: %w", addr.Address, err)
- }
- ps.add(result)
-
- if !move && result == statusQuarantine {
- cmd.Println(addr)
- return nil
- }
-
- if result == statusQuarantine {
- return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
- }
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- return fmt.Errorf("process objects batch: %w", err)
- }
- }
-}
-
-func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
- var getPrm engine.GetPrm
- getPrm.WithAddress(addr)
- res, err := storageEngine.Get(ctx, getPrm)
- if err != nil {
- return fmt.Errorf("get object %s from storage engine: %w", addr, err)
- }
-
- if err := q.Put(ctx, res.Object()); err != nil {
- return fmt.Errorf("put object %s to quarantine: %w", addr, err)
- }
-
- var delPrm engine.DeletePrm
- delPrm.WithForceRemoval()
- delPrm.WithAddress(addr)
-
- if err = storageEngine.Delete(ctx, delPrm); err != nil {
- return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
- }
- return nil
-}
-
-type processStatus struct {
- guard sync.RWMutex
- statusCount map[status]uint64
- count uint64
-}
-
-func (s *processStatus) add(st status) {
- s.guard.Lock()
- defer s.guard.Unlock()
- s.statusCount[st]++
- s.count++
-}
-
-func (s *processStatus) total() uint64 {
- s.guard.RLock()
- defer s.guard.RUnlock()
- return s.count
-}
diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
deleted file mode 100644
index 5be34d502..000000000
--- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package zombie
-
-import (
- "context"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
- shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
- fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/panjf2000/ants/v2"
- "github.com/spf13/cobra"
- "go.etcd.io/bbolt"
- "go.uber.org/zap"
-)
-
-func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
- ngOpts := storageEngineOptions(c)
- shardOpts := shardOptions(cmd, c)
- e := engine.New(ngOpts...)
- for _, opts := range shardOpts {
- _, err := e.AddShard(cmd.Context(), opts...)
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- }
- commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
- commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
- return e
-}
-
-func storageEngineOptions(c *config.Config) []engine.Option {
- return []engine.Option{
- engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
- engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
- }
-}
-
-func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
- var result [][]shard.Option
- err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
- result = append(result, getShardOpts(cmd, c, sh))
- return nil
- })
- commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
- return result
-}
-
-func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
- wc, wcEnabled := getWriteCacheOpts(sh)
- return []shard.Option{
- shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- shard.WithRefillMetabase(sh.RefillMetabase()),
- shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
- shard.WithMode(sh.Mode()),
- shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
- shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
- shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
- shard.WithWriteCache(wcEnabled),
- shard.WithWriteCacheOptions(wc),
- shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
- shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
- shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
- shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
- return pool
- }),
- shard.WithLimiter(qos.NewNoopLimiter()),
- }
-}
-
-func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
- if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
- var result []writecache.Option
- result = append(result,
- writecache.WithPath(wc.Path()),
- writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
- writecache.WithMaxObjectSize(wc.MaxObjectSize()),
- writecache.WithFlushWorkersCount(wc.WorkerCount()),
- writecache.WithMaxCacheSize(wc.SizeLimit()),
- writecache.WithMaxCacheCount(wc.CountLimit()),
- writecache.WithNoSync(wc.NoSync()),
- writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- writecache.WithQoSLimiter(qos.NewNoopLimiter()),
- )
- return result, true
- }
- return nil, false
-}
-
-func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
- var piloramaOpts []pilorama.Option
- if config.BoolSafe(c.Sub("tree"), "enabled") {
- pr := sh.Pilorama()
- piloramaOpts = append(piloramaOpts,
- pilorama.WithPath(pr.Path()),
- pilorama.WithPerm(pr.Perm()),
- pilorama.WithNoSync(pr.NoSync()),
- pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
- pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
- )
- }
- return piloramaOpts
-}
-
-func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
- return []meta.Option{
- meta.WithPath(sh.Metabase().Path()),
- meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
- meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
- meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
- meta.WithBoltDBOptions(&bbolt.Options{
- Timeout: 100 * time.Millisecond,
- }),
- meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- meta.WithEpochState(&epochState{}),
- }
-}
-
-func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
- result := []blobstor.Option{
- blobstor.WithCompression(sh.Compression()),
- blobstor.WithStorages(getSubStorages(ctx, sh)),
- blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- return result
-}
-
-func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
- var ss []blobstor.SubStorage
- for _, storage := range sh.BlobStor().Storages() {
- switch storage.Type() {
- case blobovniczatree.Type:
- sub := blobovniczaconfig.From((*config.Config)(storage))
- blobTreeOpts := []blobovniczatree.Option{
- blobovniczatree.WithRootPath(storage.Path()),
- blobovniczatree.WithPermissions(storage.Perm()),
- blobovniczatree.WithBlobovniczaSize(sub.Size()),
- blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
- blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
- blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
- blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
- blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
- blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
- blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
- blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
- blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return uint64(len(data)) < sh.SmallSizeLimit()
- },
- })
- case fstree.Type:
- sub := fstreeconfig.From((*config.Config)(storage))
- fstreeOpts := []fstree.Option{
- fstree.WithPath(storage.Path()),
- fstree.WithPerm(storage.Perm()),
- fstree.WithDepth(sub.Depth()),
- fstree.WithNoSync(sub.NoSync()),
- fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
- }
-
- ss = append(ss, blobstor.SubStorage{
- Storage: fstree.New(fstreeOpts...),
- Policy: func(_ *objectSDK.Object, _ []byte) bool {
- return true
- },
- })
- default:
- // should never happen, that has already
- // been handled: when the config was read
- }
- }
- return ss
-}
-
-type epochState struct{}
-
-func (epochState) CurrentEpoch() uint64 {
- return 0
-}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/root.go b/cmd/frostfs-adm/internal/modules/metabase/root.go
deleted file mode 100644
index 5b21ed273..000000000
--- a/cmd/frostfs-adm/internal/modules/metabase/root.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package metabase
-
-import "github.com/spf13/cobra"
-
-// RootCmd is a root command of config section.
-var RootCmd = &cobra.Command{
- Use: "metabase",
- Short: "Section for metabase commands",
-}
-
-func init() {
- RootCmd.AddCommand(UpgradeCmd)
-
- initUpgradeCommand()
-}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
deleted file mode 100644
index c0c290c5e..000000000
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package metabase
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
- shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "github.com/spf13/cobra"
- "golang.org/x/sync/errgroup"
-)
-
-const (
- noCompactFlag = "no-compact"
-)
-
-var (
- errNoPathsFound = errors.New("no metabase paths found")
- errNoMorphEndpointsFound = errors.New("no morph endpoints found")
- errUpgradeFailed = errors.New("upgrade failed")
-)
-
-var UpgradeCmd = &cobra.Command{
- Use: "upgrade",
- Short: "Upgrade metabase to latest version",
- RunE: upgrade,
-}
-
-func upgrade(cmd *cobra.Command, _ []string) error {
- configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
- if err != nil {
- return err
- }
- configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
- if err != nil {
- return err
- }
- appCfg := config.New(configFile, configDir, config.EnvPrefix)
- paths, err := getMetabasePaths(appCfg)
- if err != nil {
- return err
- }
- if len(paths) == 0 {
- return errNoPathsFound
- }
- cmd.Println("found", len(paths), "metabases:")
- for i, path := range paths {
- cmd.Println(i+1, ":", path)
- }
- mc, err := createMorphClient(cmd.Context(), appCfg)
- if err != nil {
- return err
- }
- defer mc.Close()
- civ, err := createContainerInfoProvider(mc)
- if err != nil {
- return err
- }
- noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
- result := make(map[string]bool)
- var resultGuard sync.Mutex
- eg, ctx := errgroup.WithContext(cmd.Context())
- for _, path := range paths {
- eg.Go(func() error {
- var success bool
- cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) {
- cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
- }); err != nil {
- cmd.Println("error: failed to upgrade metabase", path, ":", err)
- } else {
- success = true
- cmd.Println("metabase", path, "upgraded successfully")
- }
- resultGuard.Lock()
- result[path] = success
- resultGuard.Unlock()
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- return err
- }
- allSuccess := true
- for mb, ok := range result {
- if ok {
- cmd.Println(mb, ": success")
- } else {
- cmd.Println(mb, ": failed")
- allSuccess = false
- }
- }
- if allSuccess {
- return nil
- }
- return errUpgradeFailed
-}
-
-func getMetabasePaths(appCfg *config.Config) ([]string, error) {
- var paths []string
- if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
- paths = append(paths, sc.Metabase().Path())
- return nil
- }); err != nil {
- return nil, fmt.Errorf("get metabase paths: %w", err)
- }
- return paths, nil
-}
-
-func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) {
- addresses := morphconfig.RPCEndpoint(appCfg)
- if len(addresses) == 0 {
- return nil, errNoMorphEndpointsFound
- }
- key := nodeconfig.Key(appCfg)
- cli, err := client.New(ctx,
- key,
- client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
- client.WithEndpoints(addresses...),
- client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
- )
- if err != nil {
- return nil, fmt.Errorf("create morph client:%w", err)
- }
- return cli, nil
-}
-
-func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) {
- sh, err := cli.NNSContractAddress(client.NNSContainerContractName)
- if err != nil {
- return nil, fmt.Errorf("resolve container contract hash: %w", err)
- }
- cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
- if err != nil {
- return nil, fmt.Errorf("create morph container client: %w", err)
- }
- return container.NewInfoProvider(func() (container.Source, error) {
- return morphcontainer.AsContainerSource(cc), nil
- }), nil
-}
-
-func initUpgradeCommand() {
- flags := UpgradeCmd.Flags()
- flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
deleted file mode 100644
index 1960faab4..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package ape
-
-import (
- "bytes"
- "encoding/json"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- jsonFlag = "json"
- jsonFlagDesc = "Output rule chains in JSON format"
- addrAdminFlag = "addr"
- addrAdminDesc = "The address of the admins wallet"
-)
-
-var (
- addRuleChainCmd = &cobra.Command{
- Use: "add-rule-chain",
- Short: "Add rule chain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- Run: addRuleChain,
- }
-
- removeRuleChainCmd = &cobra.Command{
- Use: "rm-rule-chain",
- Short: "Remove rule chain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- Run: removeRuleChain,
- }
-
- listRuleChainsCmd = &cobra.Command{
- Use: "list-rule-chains",
- Short: "List rule chains",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: listRuleChains,
- }
-
- setAdminCmd = &cobra.Command{
- Use: "set-admin",
- Short: "Set admin",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- Run: setAdmin,
- }
-
- getAdminCmd = &cobra.Command{
- Use: "get-admin",
- Short: "Get admin",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: getAdmin,
- }
-
- listTargetsCmd = &cobra.Command{
- Use: "list-targets",
- Short: "List targets",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: listTargets,
- }
-)
-
-func initAddRuleChainCmd() {
- Cmd.AddCommand(addRuleChainCmd)
-
- addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
-
- addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
- addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
- addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
- addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag)
-}
-
-func initRemoveRuleChainCmd() {
- Cmd.AddCommand(removeRuleChainCmd)
-
- removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag)
- removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
- removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target")
- removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag)
-}
-
-func initListRuleChainsCmd() {
- Cmd.AddCommand(listRuleChainsCmd)
-
- listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc)
- listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc)
-}
-
-func initSetAdminCmd() {
- Cmd.AddCommand(setAdminCmd)
-
- setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- setAdminCmd.Flags().String(addrAdminFlag, "", addrAdminDesc)
- _ = setAdminCmd.MarkFlagRequired(addrAdminFlag)
-}
-
-func initGetAdminCmd() {
- Cmd.AddCommand(getAdminCmd)
-
- getAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
-}
-
-func initListTargetsCmd() {
- Cmd.AddCommand(listTargetsCmd)
-
- listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc)
- _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
-}
-
-func addRuleChain(cmd *cobra.Command, _ []string) {
- chain := apeCmd.ParseChain(cmd)
- target := parseTarget(cmd)
- pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain)
- cmd.Println("Waiting for transaction to persist...")
- _, err = ac.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err)
- cmd.Println("Rule chain added successfully")
-}
-
-func removeRuleChain(cmd *cobra.Command, _ []string) {
- target := parseTarget(cmd)
- pci, ac := newPolicyContractInterface(cmd)
- removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag)
- if removeAll {
- h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target)
- cmd.Println("Waiting for transaction to persist...")
- _, err = ac.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
- cmd.Println("All chains for target removed successfully")
- } else {
- chainID := apeCmd.ParseChainID(cmd)
- h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID)
- cmd.Println("Waiting for transaction to persist...")
- _, err = ac.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err)
- cmd.Println("Rule chain removed successfully")
- }
-}
-
-func listRuleChains(cmd *cobra.Command, _ []string) {
- target := parseTarget(cmd)
- pci, _ := newPolicyContractReaderInterface(cmd)
- chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target)
- commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err)
- if len(chains) == 0 {
- return
- }
-
- toJSON, _ := cmd.Flags().GetBool(jsonFlag)
- if toJSON {
- prettyJSONFormat(cmd, chains)
- } else {
- for _, c := range chains {
- apeCmd.PrintHumanReadableAPEChain(cmd, c)
- }
- }
-}
-
-func setAdmin(cmd *cobra.Command, _ []string) {
- s, _ := cmd.Flags().GetString(addrAdminFlag)
- addr, err := address.StringToUint160(s)
- commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
- pci, ac := newPolicyContractInterface(cmd)
- h, vub, err := pci.SetAdmin(addr)
- cmd.Println("Waiting for transaction to persist...")
- _, err = ac.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "can't set admin: %w", err)
- cmd.Println("Admin set successfully")
-}
-
-func getAdmin(cmd *cobra.Command, _ []string) {
- pci, _ := newPolicyContractReaderInterface(cmd)
- addr, err := pci.GetAdmin()
- commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
- cmd.Println(address.Uint160ToString(addr))
-}
-
-func listTargets(cmd *cobra.Command, _ []string) {
- typ := apeCmd.ParseTargetType(cmd)
- pci, inv := newPolicyContractReaderInterface(cmd)
-
- sid, it, err := pci.ListTargetsIterator(typ)
- commonCmd.ExitOnErr(cmd, "list targets error: %w", err)
- items, err := inv.TraverseIterator(sid, &it, 0)
- for err == nil && len(items) != 0 {
- for _, item := range items {
- bts, err := item.TryBytes()
- commonCmd.ExitOnErr(cmd, "list targets error: %w", err)
- if len(bts) == 0 {
- cmd.Println("(no name)")
- } else {
- cmd.Println(string(bts))
- }
- }
- items, err = inv.TraverseIterator(sid, &it, 0)
- commonCmd.ExitOnErr(cmd, "unable to list targets: %w", err)
- }
-}
-
-func prettyJSONFormat(cmd *cobra.Command, chains []*apechain.Chain) {
- wr := bytes.NewBufferString("")
- data, err := json.Marshal(chains)
- if err == nil {
- err = json.Indent(wr, data, "", " ")
- }
- commonCmd.ExitOnErr(cmd, "print rule chain error: %w", err)
- cmd.Println(wr)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
deleted file mode 100644
index 3c332c3f0..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package ape
-
-import (
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var errUnknownTargetType = errors.New("unknown target type")
-
-func parseTarget(cmd *cobra.Command) policyengine.Target {
- typ := apeCmd.ParseTargetType(cmd)
- name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag)
- switch typ {
- case policyengine.Namespace:
- if name == "root" {
- name = ""
- }
- return policyengine.NamespaceTarget(name)
- case policyengine.Container:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return policyengine.ContainerTarget(name)
- case policyengine.User:
- return policyengine.UserTarget(name)
- case policyengine.Group:
- return policyengine.GroupTarget(name)
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
- }
- panic("unreachable")
-}
-
-// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
-type invokerAdapter struct {
- *invoker.Invoker
- rpcActor invoker.RPCInvoke
-}
-
-func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
- return n.rpcActor
-}
-
-func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
-
- inv := invoker.New(c, nil)
- r := management.NewReader(inv)
- nnsCs, err := helper.GetContractByID(r, 1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
-
- ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
- commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
-
- invokerAdapter := &invokerAdapter{
- Invoker: inv,
- rpcActor: c,
- }
-
- return morph.NewContractStorageReader(invokerAdapter, ch), inv
-}
-
-func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
-
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
- commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
-
- var ch util.Uint160
- r := management.NewReader(ac.Invoker)
- nnsCs, err := helper.GetContractByID(r, 1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
-
- ch, err = helper.NNSResolveHash(ac.Invoker, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
- commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
-
- return morph.NewContractStorage(ac, ch), ac
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/root.go b/cmd/frostfs-adm/internal/modules/morph/ape/root.go
deleted file mode 100644
index a4746cd2c..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/ape/root.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package ape
-
-import "github.com/spf13/cobra"
-
-var Cmd = &cobra.Command{
- Use: "ape",
- Short: "Section for APE configuration commands",
-}
-
-func init() {
- initAddRuleChainCmd()
- initRemoveRuleChainCmd()
- initListRuleChainsCmd()
- initSetAdminCmd()
- initGetAdminCmd()
- initListTargetsCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance.go
similarity index 84%
rename from cmd/frostfs-adm/internal/modules/morph/balance/balance.go
rename to cmd/frostfs-adm/internal/modules/morph/balance.go
index 23dba14f4..6debc50b9 100644
--- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go
+++ b/cmd/frostfs-adm/internal/modules/morph/balance.go
@@ -1,4 +1,4 @@
-package balance
+package morph
import (
"crypto/elliptic"
@@ -7,9 +7,6 @@ import (
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -19,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
@@ -52,7 +48,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160
)
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return err
}
@@ -60,13 +56,12 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
inv := invoker.New(c, nil)
if dumpStorage || dumpAlphabet || dumpProxy {
- r := management.NewReader(inv)
- nnsCs, err = helper.GetContractByID(r, 1)
+ nnsCs, err = c.GetContractStateByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
- nmHash, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.NetmapContract))
+ nmHash, err = nnsResolveHash(inv, nnsCs.Hash, netmapContract+".frostfs")
if err != nil {
return fmt.Errorf("can't get netmap contract hash: %w", err)
}
@@ -139,7 +134,7 @@ func printStorageNodeBalances(cmd *cobra.Command, inv *invoker.Invoker, nmHash u
}
func printProxyContractBalance(cmd *cobra.Command, inv *invoker.Invoker, nnsHash util.Uint160) error {
- h, err := helper.NNSResolveHash(inv, nnsHash, helper.DomainOf(constants.ProxyContract))
+ h, err := nnsResolveHash(inv, nnsHash, proxyContract+".frostfs")
if err != nil {
return fmt.Errorf("can't get hash of the proxy contract: %w", err)
}
@@ -153,16 +148,18 @@ func printProxyContractBalance(cmd *cobra.Command, inv *invoker.Invoker, nnsHash
return nil
}
-func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *invoker.Invoker, count int, nnsHash util.Uint160) error {
+func printAlphabetContractBalances(cmd *cobra.Command, c Client, inv *invoker.Invoker, count int, nnsHash util.Uint160) error {
alphaList := make([]accBalancePair, count)
w := io.NewBufBinWriter()
for i := range alphaList {
emit.AppCall(w.BinWriter, nnsHash, "resolve", callflag.ReadOnly,
- helper.GetAlphabetNNSDomain(i),
+ getAlphabetNNSDomain(i),
int64(nns.TXT))
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@@ -170,7 +167,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
}
for i := range alphaList {
- h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i])
+ h, err := parseNNSResolveResult(alphaRes.Stack[i])
if err != nil {
return fmt.Errorf("can't fetch the alphabet contract #%d hash: %w", i, err)
}
@@ -185,7 +182,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
return nil
}
-func fetchIRNodes(c helper.Client, desigHash util.Uint160) ([]accBalancePair, error) {
+func fetchIRNodes(c Client, desigHash util.Uint160) ([]accBalancePair, error) {
inv := invoker.New(c, nil)
height, err := c.GetBlockCount()
@@ -193,7 +190,7 @@ func fetchIRNodes(c helper.Client, desigHash util.Uint160) ([]accBalancePair, er
return nil, fmt.Errorf("can't get block height: %w", err)
}
- arr, err := helper.GetDesignatedByRole(inv, desigHash, noderoles.NeoFSAlphabet, height)
+ arr, err := getDesignatedByRole(inv, desigHash, noderoles.NeoFSAlphabet, height)
if err != nil {
return nil, errors.New("can't fetch list of IR nodes from the netmap contract")
}
@@ -225,7 +222,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(w.Err)
+ }
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/root.go b/cmd/frostfs-adm/internal/modules/morph/balance/root.go
deleted file mode 100644
index 3be712367..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/balance/root.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package balance
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var DumpCmd = &cobra.Command{
- Use: "dump-balances",
- Short: "Dump GAS balances",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: dumpBalances,
-}
-
-func initDumpBalancesCmd() {
- DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- DumpCmd.Flags().BoolP(dumpBalancesStorageFlag, "s", false, "Dump balances of storage nodes from the current netmap")
- DumpCmd.Flags().BoolP(dumpBalancesAlphabetFlag, "a", false, "Dump balances of alphabet contracts")
- DumpCmd.Flags().BoolP(dumpBalancesProxyFlag, "p", false, "Dump balances of the proxy contract")
- DumpCmd.Flags().Bool(dumpBalancesUseScriptHashFlag, false, "Use script-hash format for addresses")
-}
-
-func init() {
- initDumpBalancesCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config.go
similarity index 54%
rename from cmd/frostfs-adm/internal/modules/morph/config/config.go
rename to cmd/frostfs-adm/internal/modules/morph/config.go
index c17fb62ff..a86829aaa 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config.go
@@ -1,4 +1,4 @@
-package config
+package morph
import (
"bytes"
@@ -10,12 +10,9 @@ import (
"strings"
"text/tabwriter"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
@@ -26,20 +23,19 @@ import (
const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
inv := invoker.New(c, nil)
- r := management.NewReader(inv)
- cs, err := helper.GetContractByID(r, 1)
+ cs, err := c.GetContractStateByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
- nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.NetmapContract))
+ nmHash, err := nnsResolveHash(inv, cs.Hash, netmapContract+".frostfs")
if err != nil {
return fmt.Errorf("can't get netmap contract hash: %w", err)
}
@@ -52,7 +48,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
- m, err := helper.ParseConfigFromNetmapContract(arr)
+ m, err := parseConfigFromNetmapContract(arr)
if err != nil {
return err
}
@@ -60,19 +56,18 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
switch k {
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
- netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
- netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
+ netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
nbuf := make([]byte, 8)
- copy(nbuf, v)
+ copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
- return helper.InvalidConfigValueErr(k)
+ return invalidConfigValueErr(k)
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
- _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}
@@ -82,44 +77,35 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
return nil
}
-func SetConfigCmd(cmd *cobra.Command, args []string) error {
+func setConfigCmd(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("empty config pairs")
}
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
}
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- cs, err := helper.GetContractByID(r, 1)
+ cs, err := wCtx.Client.GetContractStateByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
- nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract))
+ nmHash, err := nnsResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
if err != nil {
return fmt.Errorf("can't get netmap contract hash: %w", err)
}
forceFlag, _ := cmd.Flags().GetBool(forceConfigSet)
+
bw := io.NewBufBinWriter()
- prm := make(map[string]any)
for _, arg := range args {
k, v, err := parseConfigPair(arg, forceFlag)
if err != nil {
return err
}
- prm[k] = v
- }
-
- if err := validateConfig(prm, forceFlag); err != nil {
- return err
- }
-
- for k, v := range prm {
// In NeoFS this is done via Notary contract. Here, however, we can form the
// transaction locally. The first `nil` argument is required only for notary
// disabled environment which is not supported by that command.
@@ -129,56 +115,12 @@ func SetConfigCmd(cmd *cobra.Command, args []string) error {
}
}
- err = wCtx.SendConsensusTx(bw.Bytes())
+ err = wCtx.sendConsensusTx(bw.Bytes())
if err != nil {
return err
}
- return wCtx.AwaitTx()
-}
-
-const maxECSum = 256
-
-func validateConfig(args map[string]any, forceFlag bool) error {
- var sumEC int64
- _, okData := args[netmap.MaxECDataCountConfig]
- _, okParity := args[netmap.MaxECParityCountConfig]
- if okData != okParity {
- return fmt.Errorf("both %s and %s must be present in the configuration",
- netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig)
- }
-
- for k, v := range args {
- switch k {
- case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
- netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
- netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
- netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
- value, ok := v.(int64)
- if !ok {
- return fmt.Errorf("%s has an invalid type. Expected type: int", k)
- }
-
- if value < 0 {
- return fmt.Errorf("%s must be >= 0, got %v", k, v)
- }
-
- if k == netmap.MaxECDataCountConfig || k == netmap.MaxECParityCountConfig {
- sumEC += value
- }
- case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
- _, ok := v.(bool)
- if !ok {
- return fmt.Errorf("%s has an invalid type. Expected type: bool", k)
- }
- }
- }
-
- if sumEC > maxECSum && !forceFlag {
- return fmt.Errorf("the sum of %s and %s must be <= %d, got %d",
- netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig, maxECSum, sumEC)
- }
- return nil
+ return wCtx.awaitTx()
}
func parseConfigPair(kvStr string, force bool) (key string, val any, err error) {
@@ -193,8 +135,7 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
switch key {
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
- netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
- netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
+ netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
val, err = strconv.ParseInt(valRaw, 10, 64)
if err != nil {
err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err)
@@ -217,3 +158,7 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
return
}
+
+func invalidConfigValueErr(key string) error {
+ return fmt.Errorf("invalid %s config value from netmap contract", key)
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config_test.go b/cmd/frostfs-adm/internal/modules/morph/config/config_test.go
deleted file mode 100644
index c6d5b2827..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/config/config_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package config
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "github.com/stretchr/testify/require"
-)
-
-func Test_ValidateConfig(t *testing.T) {
- testArgs := make(map[string]any)
-
- testArgs[netmap.MaxECDataCountConfig] = int64(11)
- require.Error(t, validateConfig(testArgs, false))
-
- testArgs[netmap.MaxECParityCountConfig] = int64(256)
- require.Error(t, validateConfig(testArgs, false))
- require.NoError(t, validateConfig(testArgs, true))
-
- testArgs[netmap.MaxECParityCountConfig] = int64(-1)
- require.Error(t, validateConfig(testArgs, false))
-
- testArgs[netmap.MaxECParityCountConfig] = int64(55)
- require.NoError(t, validateConfig(testArgs, false))
-
- testArgs[netmap.HomomorphicHashingDisabledKey] = "1"
- require.Error(t, validateConfig(testArgs, false))
-
- testArgs[netmap.HomomorphicHashingDisabledKey] = true
- require.NoError(t, validateConfig(testArgs, false))
-
- testArgs["not-well-known-configuration-key"] = "key"
- require.NoError(t, validateConfig(testArgs, false))
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/config/root.go b/cmd/frostfs-adm/internal/modules/morph/config/root.go
deleted file mode 100644
index 6b9094de0..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/config/root.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package config
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- SetCmd = &cobra.Command{
- Use: "set-config key1=val1 [key2=val2 ...]",
- DisableFlagsInUseLine: true,
- Short: "Add/update global config value in the FrostFS network",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Args: cobra.MinimumNArgs(1),
- RunE: SetConfigCmd,
- }
-
- DumpCmd = &cobra.Command{
- Use: "dump-config",
- Short: "Dump FrostFS network config",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: dumpNetworkConfig,
- }
-)
-
-func initSetConfigCmd() {
- SetCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- SetCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- SetCmd.Flags().Bool(forceConfigSet, false, "Force setting not well-known configuration key")
- SetCmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
-}
-
-func initDumpNetworkConfigCmd() {
- DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
-}
-
-func init() {
- initSetConfigCmd()
- initDumpNetworkConfigCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go
deleted file mode 100644
index be4041a86..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package constants
-
-import "time"
-
-const (
- ConsensusAccountName = "consensus"
-
- // MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
- // of the invocation script.
- // See: https://github.com/nspcc-dev/neo-go/blob/740488f7f35e367eaa99a71c0a609c315fe2b0fc/pkg/core/transaction/witness.go#L10
- MaxAlphabetNodes = 22
-
- SingleAccountName = "single"
- CommitteeAccountName = "committee"
-
- NNSContract = "nns"
- FrostfsContract = "frostfs" // not deployed in side-chain.
- ProcessingContract = "processing" // not deployed in side-chain.
- AlphabetContract = "alphabet"
- BalanceContract = "balance"
- ContainerContract = "container"
- FrostfsIDContract = "frostfsid"
- NetmapContract = "netmap"
- PolicyContract = "policy"
- ProxyContract = "proxy"
-
- ContractWalletFilename = "contract.json"
- ContractWalletPasswordKey = "contract"
-
- FrostfsOpsEmail = "ops@frostfs.info"
- NNSRefreshDefVal = int64(3600)
- NNSRetryDefVal = int64(600)
- NNSTtlDefVal = int64(3600)
-
- DefaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
-
- DeployMethodName = "deploy"
- UpdateMethodName = "update"
-
- TestContractPassword = "grouppass"
-)
-
-var (
- ContractList = []string{
- BalanceContract,
- ContainerContract,
- FrostfsIDContract,
- NetmapContract,
- PolicyContract,
- ProxyContract,
- }
-
- FullContractList = append([]string{
- FrostfsContract,
- ProcessingContract,
- NNSContract,
- AlphabetContract,
- }, ContractList...)
-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container.go
similarity index 74%
rename from cmd/frostfs-adm/internal/modules/morph/container/container.go
rename to cmd/frostfs-adm/internal/modules/morph/container.go
index 79685f111..687d7e84e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container.go
@@ -1,21 +1,16 @@
-package container
+package morph
import (
"encoding/json"
"errors"
"fmt"
"os"
- "slices"
"sort"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -27,19 +22,18 @@ import (
var errInvalidContainerResponse = errors.New("invalid response from container contract")
-func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker) (util.Uint160, error) {
+func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker, c Client) (util.Uint160, error) {
s, err := cmd.Flags().GetString(containerContractFlag)
var ch util.Uint160
if err == nil {
ch, err = util.Uint160DecodeStringLE(s)
}
if err != nil {
- r := management.NewReader(inv)
- nnsCs, err := helper.GetContractByID(r, 1)
+ nnsCs, err := c.GetContractStateByID(1)
if err != nil {
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
}
- ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.ContainerContract))
+ ch, err = nnsResolveHash(inv, nnsCs.Hash, containerContract+".frostfs")
if err != nil {
return util.Uint160{}, err
}
@@ -77,14 +71,14 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
inv := invoker.New(c, nil)
- ch, err := getContainerContractHash(cmd, inv)
+ ch, err := getContainerContractHash(cmd, inv, c)
if err != nil {
return fmt.Errorf("unable to get contaract hash: %w", err)
}
@@ -140,12 +134,13 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
bw.Reset()
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
+ emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
res, err := inv.Run(bw.Bytes())
if err != nil {
return nil, fmt.Errorf("can't get container info: %w", err)
}
- if len(res.Stack) != 1 {
- return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
+ if len(res.Stack) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
}
cnt := new(Container)
@@ -154,18 +149,26 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
+ ea := new(EACL)
+ err = ea.FromStackItem(res.Stack[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+ if len(ea.Value) != 0 {
+ cnt.EACL = ea
+ }
return cnt, nil
}
func listContainers(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
inv := invoker.New(c, nil)
- ch, err := getContainerContractHash(cmd, inv)
+ ch, err := getContainerContractHash(cmd, inv, c)
if err != nil {
return fmt.Errorf("unable to get contaract hash: %w", err)
}
@@ -187,11 +190,11 @@ func restoreContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err)
}
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
if err != nil {
return err
}
- defer wCtx.Close()
+ defer wCtx.close()
containers, err := parseContainers(filename)
if err != nil {
@@ -213,10 +216,10 @@ func restoreContainers(cmd *cobra.Command, _ []string) error {
return err
}
- return wCtx.AwaitTx()
+ return wCtx.awaitTx()
}
-func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd *cobra.Command, wCtx *helper.InitializeContext, ch util.Uint160) error {
+func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd *cobra.Command, wCtx *initializeContext, ch util.Uint160) error {
bw := io.NewBufBinWriter()
for _, cnt := range containers {
hv := hash.Sha256(cnt.Value)
@@ -236,9 +239,11 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
- assert.NoError(bw.Err)
+ if bw.Err != nil {
+ panic(bw.Err)
+ }
- if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
+ if err := wCtx.sendConsensusTx(bw.Bytes()); err != nil {
return err
}
}
@@ -248,9 +253,13 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
+ if ea := cnt.EACL; ea != nil {
+ emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
+ ea.Value, ea.Signature, ea.PublicKey, ea.Token)
+ }
}
-func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
+func isContainerRestored(cmd *cobra.Command, wCtx *initializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
emit.AppCall(bw.BinWriter, containerHash, "get", callflag.All, hashValue.BytesBE())
res, err := wCtx.Client.InvokeScript(bw.Bytes(), nil)
if err != nil {
@@ -288,14 +297,13 @@ func parseContainers(filename string) ([]Container, error) {
return containers, nil
}
-func fetchContainerContractHash(wCtx *helper.InitializeContext) (util.Uint160, error) {
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- nnsCs, err := helper.GetContractByID(r, 1)
+func fetchContainerContractHash(wCtx *initializeContext) (util.Uint160, error) {
+ nnsCs, err := wCtx.Client.GetContractStateByID(1)
if err != nil {
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
}
- ch, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, nnsCs.Hash, helper.DomainOf(constants.ContainerContract))
+ ch, err := nnsResolveHash(wCtx.ReadOnlyInvoker, nnsCs.Hash, containerContract+".frostfs")
if err != nil {
return util.Uint160{}, fmt.Errorf("can't fetch container contract hash: %w", err)
}
@@ -308,6 +316,15 @@ type Container struct {
Signature []byte `json:"signature"`
PublicKey []byte `json:"public_key"`
Token []byte `json:"token"`
+ EACL *EACL `json:"eacl"`
+}
+
+// EACL represents extended ACL struct in contract storage.
+type EACL struct {
+ Value []byte `json:"value"`
+ Signature []byte `json:"signature"`
+ PublicKey []byte `json:"public_key"`
+ Token []byte `json:"token"`
}
// ToStackItem implements stackitem.Convertible.
@@ -354,6 +371,50 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
return nil
}
+// ToStackItem implements stackitem.Convertible.
+func (c *EACL) ToStackItem() (stackitem.Item, error) {
+ return stackitem.NewStruct([]stackitem.Item{
+ stackitem.NewByteArray(c.Value),
+ stackitem.NewByteArray(c.Signature),
+ stackitem.NewByteArray(c.PublicKey),
+ stackitem.NewByteArray(c.Token),
+ }), nil
+}
+
+// FromStackItem implements stackitem.Convertible.
+func (c *EACL) FromStackItem(item stackitem.Item) error {
+ arr, ok := item.Value().([]stackitem.Item)
+ if !ok || len(arr) != 4 {
+ return errors.New("invalid stack item type")
+ }
+
+ value, err := arr[0].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL value")
+ }
+
+ sig, err := arr[1].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL signature")
+ }
+
+ pub, err := arr[2].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL public key")
+ }
+
+ tok, err := arr[3].TryBytes()
+ if err != nil {
+ return errors.New("invalid eACL token")
+ }
+
+ c.Value = value
+ c.Signature = sig
+ c.PublicKey = pub
+ c.Token = tok
+ return nil
+}
+
// getCIDFilterFunc returns filtering function for container IDs.
// Raw byte slices are used because it works with structures returned
// from contract.
@@ -380,7 +441,7 @@ func getCIDFilterFunc(cmd *cobra.Command) (func([]byte) bool, error) {
var id cid.ID
id.SetSHA256(v)
idStr := id.EncodeToString()
- _, found := slices.BinarySearch(rawIDs, idStr)
- return found
+ n := sort.Search(len(rawIDs), func(i int) bool { return rawIDs[i] >= idStr })
+ return n < len(rawIDs) && rawIDs[n] == idStr
}, nil
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/root.go b/cmd/frostfs-adm/internal/modules/morph/container/root.go
deleted file mode 100644
index 2b314ab09..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/container/root.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- containerDumpFlag = "dump"
- containerContractFlag = "container-contract"
- containerIDsFlag = "cid"
-)
-
-var (
- DumpCmd = &cobra.Command{
- Use: "dump-containers",
- Short: "Dump FrostFS containers to file",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: dumpContainers,
- }
-
- RestoreCmd = &cobra.Command{
- Use: "restore-containers",
- Short: "Restore FrostFS containers from file",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: restoreContainers,
- }
-
- ListCmd = &cobra.Command{
- Use: "list-containers",
- Short: "List FrostFS containers",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: listContainers,
- }
-)
-
-func initListContainersCmd() {
- ListCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ListCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)")
-}
-
-func initRestoreContainersCmd() {
- RestoreCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- RestoreCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RestoreCmd.Flags().String(containerDumpFlag, "", "File to restore containers from")
- RestoreCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to restore")
-}
-
-func initDumpContainersCmd() {
- DumpCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- DumpCmd.Flags().String(containerDumpFlag, "", "File where to save dumped containers")
- DumpCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)")
- DumpCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to dump")
-}
-
-func init() {
- initDumpContainersCmd()
- initRestoreContainersCmd()
- initListContainersCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/root.go b/cmd/frostfs-adm/internal/modules/morph/contract/root.go
deleted file mode 100644
index 9bad2bd66..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/contract/root.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package contract
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- DumpHashesCmd = &cobra.Command{
- Use: "dump-hashes",
- Short: "Dump deployed contract hashes",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: dumpContractHashes,
- }
- UpdateCmd = &cobra.Command{
- Use: "update-contracts",
- Short: "Update FrostFS contracts",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: updateContracts,
- }
-)
-
-func initDumpContractHashesCmd() {
- DumpHashesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- DumpHashesCmd.Flags().String(commonflags.CustomZoneFlag, "", "Custom zone to search.")
-}
-
-func initUpdateContractsCmd() {
- UpdateCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- UpdateCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- UpdateCmd.Flags().String(commonflags.ContractsInitFlag, "", commonflags.ContractsInitFlagDesc)
- UpdateCmd.Flags().String(commonflags.ContractsURLFlag, "", commonflags.ContractsURLFlagDesc)
- UpdateCmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag)
-}
-
-func init() {
- initDumpContractHashesCmd()
- initUpdateContractsCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/update.go b/cmd/frostfs-adm/internal/modules/morph/contract/update.go
deleted file mode 100644
index 109849aab..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/contract/update.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package contract
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/common"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- io2 "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- neoUtil "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var errMissingNNSRecord = errors.New("missing NNS record")
-
-func updateContracts(cmd *cobra.Command, _ []string) error {
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return fmt.Errorf("initialization error: %w", err)
- }
-
- if err := helper.DeployNNS(wCtx, constants.UpdateMethodName); err != nil {
- return err
- }
-
- return updateContractsInternal(wCtx)
-}
-
-func updateContractsInternal(c *helper.InitializeContext) error {
- alphaCs := c.GetContract(constants.AlphabetContract)
-
- nnsCs, err := c.NNSContractState()
- if err != nil {
- return err
- }
- nnsHash := nnsCs.Hash
-
- w := io2.NewBufBinWriter()
-
- // Update script size for a single-node committee is close to the maximum allowed size of 65535.
- // Because of this we want to reuse alphabet contract NEF and manifest for different updates.
- // The generated script is as following.
- // 1. Initialize static slot for alphabet NEF.
- // 2. Store NEF into the static slot.
- // 3. Push parameters for each alphabet contract on stack.
- // 4. Add contract group to the manifest.
- // 5. For each alphabet contract, invoke `update` using parameters on stack and
- // NEF from step 2 and manifest from step 4.
- emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
- emit.Bytes(w.BinWriter, alphaCs.RawNEF)
- emit.Opcodes(w.BinWriter, opcode.STSFLD0)
-
- keysParam, err := deployAlphabetAccounts(c, nnsHash, w, alphaCs)
- if err != nil {
- return err
- }
-
- w.Reset()
-
- if err = deployOrUpdateContracts(c, w, nnsHash, keysParam); err != nil {
- return err
- }
-
- groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey()
- _, _, err = c.EmitUpdateNNSGroupScript(w, nnsHash, groupKey)
- if err != nil {
- return err
- }
- c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes()))
-
- emit.Opcodes(w.BinWriter, opcode.LDSFLD0)
- emit.Int(w.BinWriter, 1)
- emit.Opcodes(w.BinWriter, opcode.PACK)
- emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
-
- if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return err
- }
- return c.AwaitTx()
-}
-
-func deployAlphabetAccounts(c *helper.InitializeContext, nnsHash neoUtil.Uint160, w *io2.BufBinWriter, alphaCs *helper.ContractState) ([]any, error) {
- var keysParam []any
-
- baseGroups := alphaCs.Manifest.Groups
-
- // alphabet contracts should be deployed by individual nodes to get different hashes.
- for i, acc := range c.Accounts {
- ctrHash, err := helper.NNSResolveHash(c.ReadOnlyInvoker, nnsHash, helper.GetAlphabetNNSDomain(i))
- if err != nil {
- return nil, fmt.Errorf("can't resolve hash for contract update: %w", err)
- }
-
- keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes())
-
- params := c.GetAlphabetDeployItems(i, len(c.Wallets))
- emit.Array(w.BinWriter, params...)
-
- alphaCs.Manifest.Groups = baseGroups
- err = helper.AddManifestGroup(c.ContractWallet, ctrHash, alphaCs)
- if err != nil {
- return nil, fmt.Errorf("can't sign manifest group: %v", err)
- }
-
- emit.Bytes(w.BinWriter, alphaCs.RawManifest)
- emit.Opcodes(w.BinWriter, opcode.LDSFLD0)
- emit.Int(w.BinWriter, 3)
- emit.Opcodes(w.BinWriter, opcode.PACK)
- emit.AppCallNoArgs(w.BinWriter, ctrHash, constants.UpdateMethodName, callflag.All)
- }
- if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- if !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
- return nil, err
- }
- c.Command.Println("Alphabet contracts are already updated.")
- }
-
- return keysParam, nil
-}
-
-func deployOrUpdateContracts(c *helper.InitializeContext, w *io2.BufBinWriter, nnsHash neoUtil.Uint160, keysParam []any) error {
- emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
- emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All)
- emit.Opcodes(w.BinWriter, opcode.STSFLD0)
- emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1)
-
- for _, ctrName := range constants.ContractList {
- cs := c.GetContract(ctrName)
-
- method := constants.UpdateMethodName
- ctrHash, err := helper.NNSResolveHash(c.ReadOnlyInvoker, nnsHash, helper.DomainOf(ctrName))
- if err != nil {
- if errors.Is(err, errMissingNNSRecord) {
- // if contract not found we deploy it instead of update
- method = constants.DeployMethodName
- } else {
- return fmt.Errorf("can't resolve hash for contract update: %w", err)
- }
- }
-
- err = helper.AddManifestGroup(c.ContractWallet, ctrHash, cs)
- if err != nil {
- return fmt.Errorf("can't sign manifest group: %v", err)
- }
-
- invokeHash := management.Hash
- if method == constants.UpdateMethodName {
- invokeHash = ctrHash
- }
-
- args, err := helper.GetContractDeployData(c, ctrName, keysParam, constants.UpdateMethodName)
- if err != nil {
- return fmt.Errorf("%s: getting update params: %v", ctrName, err)
- }
- params := helper.GetContractDeployParameters(cs, args)
- res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
- if err != nil {
- if method != constants.UpdateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
- return fmt.Errorf("deploy contract: %w", err)
- }
- c.Command.Printf("%s contract is already updated.\n", ctrName)
- continue
- }
-
- w.WriteBytes(res.Script)
-
- if method == constants.DeployMethodName {
- // same actions are done in InitializeContext.setNNS, can be unified
- domain := ctrName + ".frostfs"
- script, ok, err := c.NNSRegisterDomainScript(nnsHash, cs.Hash, domain)
- if err != nil {
- return err
- }
- if !ok {
- w.WriteBytes(script)
- emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT))
- emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
- domain, int64(nns.TXT), cs.Hash.StringLE())
- emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
- domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
- }
- c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE())
- }
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/deploy.go
similarity index 71%
rename from cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
rename to cmd/frostfs-adm/internal/modules/morph/deploy.go
index 543b5fcb3..a4b945438 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/deploy.go
@@ -1,4 +1,4 @@
-package contract
+package morph
import (
"encoding/json"
@@ -7,10 +7,6 @@ import (
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -27,9 +23,10 @@ import (
const (
contractPathFlag = "contract"
updateFlag = "update"
+ customZoneFlag = "domain"
)
-var DeployCmd = &cobra.Command{
+var deployCmd = &cobra.Command{
Use: "deploy",
Short: "Deploy additional smart-contracts",
Long: `Deploy additional smart-contract which are not related to core.
@@ -40,33 +37,33 @@ Compiled contract file name must contain '_contract.nef' suffix.
Contract's manifest file name must be 'config.json'.
NNS name is taken by stripping '_contract.nef' from the NEF file (similar to frostfs contracts).`,
PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
},
RunE: deployContractCmd,
}
func init() {
- ff := DeployCmd.Flags()
+ ff := deployCmd.Flags()
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- _ = DeployCmd.MarkFlagFilename(commonflags.AlphabetWalletsFlag)
+ ff.String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ _ = deployCmd.MarkFlagFilename(alphabetWalletsFlag)
- ff.StringP(commonflags.EndpointFlag, "r", "", commonflags.EndpointFlagDesc)
+ ff.StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
ff.String(contractPathFlag, "", "Path to the contract directory")
- _ = DeployCmd.MarkFlagFilename(contractPathFlag)
+ _ = deployCmd.MarkFlagFilename(contractPathFlag)
ff.Bool(updateFlag, false, "Update an existing contract")
- ff.String(commonflags.CustomZoneFlag, "frostfs", "Custom zone for NNS")
+ ff.String(customZoneFlag, "frostfs", "Custom zone for NNS")
}
func deployContractCmd(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
- c, err := helper.NewInitializeContext(cmd, v)
+ c, err := newInitializeContext(cmd, v)
if err != nil {
return fmt.Errorf("initialization error: %w", err)
}
- defer c.Close()
+ defer c.close()
ctrPath, _ := cmd.Flags().GetString(contractPathFlag)
ctrName, err := probeContractName(ctrPath)
@@ -74,29 +71,28 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
return err
}
- cs, err := helper.ReadContract(ctrPath, ctrName)
+ cs, err := readContract(ctrPath, ctrName)
if err != nil {
return err
}
- r := management.NewReader(c.ReadOnlyInvoker)
- nnsCs, err := helper.GetContractByID(r, 1)
+ nnsCs, err := c.Client.GetContractStateByID(1)
if err != nil {
return fmt.Errorf("can't fetch NNS contract state: %w", err)
}
callHash := management.Hash
- method := constants.DeployMethodName
- zone, _ := cmd.Flags().GetString(commonflags.CustomZoneFlag)
+ method := deployMethodName
+ zone, _ := cmd.Flags().GetString(customZoneFlag)
domain := ctrName + "." + zone
isUpdate, _ := cmd.Flags().GetBool(updateFlag)
if isUpdate {
- cs.Hash, err = helper.NNSResolveHash(c.ReadOnlyInvoker, nnsCs.Hash, domain)
+ cs.Hash, err = nnsResolveHash(c.ReadOnlyInvoker, nnsCs.Hash, domain)
if err != nil {
return fmt.Errorf("can't fetch contract hash from NNS: %w", err)
}
callHash = cs.Hash
- method = constants.UpdateMethodName
+ method = updateMethodName
} else {
cs.Hash = state.CreateContractHash(
c.CommitteeAcc.Contract.ScriptHash(),
@@ -121,15 +117,17 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
- assert.NoError(writer.Err, "can't create deployment script")
+ if writer.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ }
- if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
+ if err := c.sendCommitteeTx(writer.Bytes(), false); err != nil {
return err
}
- return c.AwaitTx()
+ return c.awaitTx()
}
-func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string, domain string, cs *helper.ContractState, writer *io.BufBinWriter) error {
+func registerNNS(nnsCs *state.Contract, c *initializeContext, zone string, domain string, cs *contractState, writer *io.BufBinWriter) error {
bw := io.NewBufBinWriter()
emit.Instruction(bw.BinWriter, opcode.INITSSLOT, []byte{1})
emit.AppCall(bw.BinWriter, nnsCs.Hash, "getPrice", callflag.All)
@@ -139,7 +137,7 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
start := bw.Len()
needRecord := false
- ok, err := c.NNSRootRegistered(nnsCs.Hash, zone)
+ ok, err := c.nnsRootRegistered(nnsCs.Hash, zone)
if err != nil {
return err
} else if !ok {
@@ -147,17 +145,15 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
zone, c.CommitteeAcc.Contract.ScriptHash(),
- constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal,
- int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
+ frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(),
- constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal,
- int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
+ frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
} else {
- s, ok, err := c.NNSRegisterDomainScript(nnsCs.Hash, cs.Hash, domain)
+ s, ok, err := c.nnsRegisterDomainScript(nnsCs.Hash, cs.Hash, domain)
if err != nil {
return err
}
@@ -172,8 +168,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
- assert.NoError(bw.Err, "can't create deployment script")
- if bw.Len() != start {
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
+ } else if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/dump_hashes.go
similarity index 71%
rename from cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
rename to cmd/frostfs-adm/internal/modules/morph/dump_hashes.go
index fde58fd2b..69db5c7bd 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/dump_hashes.go
@@ -1,4 +1,4 @@
-package contract
+package morph
import (
"bytes"
@@ -8,16 +8,10 @@ import (
"text/tabwriter"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
@@ -37,27 +31,26 @@ type contractDumpInfo struct {
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
- r := management.NewReader(invoker.New(c, nil))
- cs, err := helper.GetContractByID(r, 1)
+ cs, err := c.GetContractStateByID(1)
if err != nil {
return err
}
- zone, _ := cmd.Flags().GetString(commonflags.CustomZoneFlag)
+ zone, _ := cmd.Flags().GetString(customZoneFlag)
if zone != "" {
return dumpCustomZoneHashes(cmd, cs.Hash, zone, c)
}
- infos := []contractDumpInfo{{name: constants.NNSContract, hash: cs.Hash}}
+ infos := []contractDumpInfo{{name: nnsContract, hash: cs.Hash}}
irSize := 0
for ; irSize < lastGlagoliticLetter; irSize++ {
- ok, err := helper.NNSIsAvailable(c, cs.Hash, helper.GetAlphabetNNSDomain(irSize))
+ ok, err := nnsIsAvailable(c, cs.Hash, getAlphabetNNSDomain(irSize))
if err != nil {
return err
} else if ok {
@@ -69,9 +62,9 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
if irSize != 0 {
bw.Reset()
- for i := range irSize {
+ for i := 0; i < irSize; i++ {
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
- helper.GetAlphabetNNSDomain(i),
+ getAlphabetNNSDomain(i),
int64(nns.TXT))
}
@@ -80,19 +73,19 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
- for i := range irSize {
+ for i := 0; i < irSize; i++ {
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
- if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
+ if h, err := parseNNSResolveResult(alphaRes.Stack[i]); err == nil {
info.hash = h
}
infos = append(infos, info)
}
}
- for _, ctrName := range constants.ContractList {
+ for _, ctrName := range contractList {
bw.Reset()
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
- helper.DomainOf(ctrName), int64(nns.TXT))
+ ctrName+".frostfs", int64(nns.TXT))
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
@@ -101,7 +94,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
info := contractDumpInfo{name: ctrName}
if len(res.Stack) != 0 {
- if h, err := helper.ParseNNSResolveResult(res.Stack[0]); err == nil {
+ if h, err := parseNNSResolveResult(res.Stack[0]); err == nil {
info.hash = h
}
}
@@ -114,7 +107,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
return nil
}
-func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string, c helper.Client) error {
+func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string, c Client) error {
const nnsMaxTokens = 100
inv := invoker.New(c, nil)
@@ -136,7 +129,7 @@ func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string,
return
}
- h, err := helper.NNSResolveHash(inv, nnsHash, string(bs))
+ h, err := nnsResolveHash(inv, nnsHash, string(bs))
if err != nil {
cmd.PrintErrf("Could not resolve name %s: %v\n", string(bs), err)
return
@@ -148,12 +141,7 @@ func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string,
})
}
- script, err := smartcontract.CreateCallAndPrefetchIteratorScript(nnsHash, "tokens", nnsMaxTokens)
- if err != nil {
- return fmt.Errorf("create prefetch script: %w", err)
- }
-
- arr, sessionID, iter, err := unwrap.ArrayAndSessionIterator(inv.Run(script))
+ sessionID, iter, err := unwrap.SessionIterator(inv.Call(nnsHash, "tokens"))
if err != nil {
if errors.Is(err, unwrap.ErrNoSessionID) {
items, err := unwrap.Array(inv.CallAndExpandIterator(nnsHash, "tokens", nnsMaxTokens))
@@ -170,20 +158,16 @@ func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string,
return err
}
} else {
- for i := range arr {
- processItem(arr[i])
- }
-
defer func() {
_ = inv.TerminateSession(sessionID)
}()
- items, err := inv.TraverseIterator(sessionID, &iter, 0)
+ items, err := inv.TraverseIterator(sessionID, &iter, nnsMaxTokens)
for err == nil && len(items) != 0 {
for i := range items {
processItem(items[i])
}
- items, err = inv.TraverseIterator(sessionID, &iter, 0)
+ items, err = inv.TraverseIterator(sessionID, &iter, nnsMaxTokens)
}
if err != nil {
return fmt.Errorf("error during NNS domains iteration: %w", err)
@@ -220,15 +204,15 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
- _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
- info.name, info.version, info.hash.StringLE()))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
+ info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()
cmd.Print(buf.String())
}
-func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDumpInfo) {
+func fillContractVersion(cmd *cobra.Command, c Client, infos []contractDumpInfo) {
bw := io.NewBufBinWriter()
sub := io.NewBufBinWriter()
for i := range infos {
@@ -237,17 +221,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
- assert.NoError(sub.Err, "can't create version script")
+ if sub.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
- bw.WriteBytes(script)
+ bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
- assert.NoError(bw.Err, "can't create version script")
+ if bw.Err != nil {
+ panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
+ }
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/epoch.go b/cmd/frostfs-adm/internal/modules/morph/epoch.go
new file mode 100644
index 000000000..a96efa43f
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/epoch.go
@@ -0,0 +1,65 @@
+package morph
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func forceNewEpochCmd(cmd *cobra.Command, _ []string) error {
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
+ if err != nil {
+ return fmt.Errorf("can't to initialize context: %w", err)
+ }
+
+ cs, err := wCtx.Client.GetContractStateByID(1)
+ if err != nil {
+ return fmt.Errorf("can't get NNS contract info: %w", err)
+ }
+
+ nmHash, err := nnsResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
+ if err != nil {
+ return fmt.Errorf("can't get netmap contract hash: %w", err)
+ }
+
+ bw := io.NewBufBinWriter()
+ if err := emitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ return err
+ }
+
+ if err := wCtx.sendConsensusTx(bw.Bytes()); err != nil {
+ return err
+ }
+
+ if err := wCtx.awaitTx(); err != nil {
+ if strings.Contains(err.Error(), "invalid epoch") {
+ cmd.Println("Epoch has already ticked.")
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+func emitNewEpochCall(bw *io.BufBinWriter, wCtx *initializeContext, nmHash util.Uint160) error {
+ curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
+ if err != nil {
+ return errors.New("can't fetch current epoch from the netmap contract")
+ }
+
+ newEpoch := curr + 1
+ wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
+
+ // In NeoFS this is done via Notary contract. Here, however, we can form the
+ // transaction locally.
+ emit.AppCall(bw.BinWriter, nmHash, "newEpoch", callflag.All, newEpoch)
+ return bw.Err
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
deleted file mode 100644
index 4046e85e3..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package frostfsid
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- frostfsidAddSubjectKeyCmd = &cobra.Command{
- Use: "add-subject-key",
- Short: "Add a public key to the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidAddSubjectKey,
- }
- frostfsidRemoveSubjectKeyCmd = &cobra.Command{
- Use: "remove-subject-key",
- Short: "Remove a public key from the subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidRemoveSubjectKey,
- }
-)
-
-func initFrostfsIDAddSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
-
- ff := frostfsidAddSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to add")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func initFrostfsIDRemoveSubjectKeyCmd() {
- Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
-
- ff := frostfsidRemoveSubjectKeyCmd.Flags()
- ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-
- ff.String(subjectAddressFlag, "", "Subject address")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
-
- ff.String(subjectKeyFlag, "", "Public key to remove")
- _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
-}
-
-func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
-}
-
-func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
- addr := getFrostfsIDSubjectAddress(cmd)
- pub := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
deleted file mode 100644
index 7f777db98..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go
+++ /dev/null
@@ -1,629 +0,0 @@
-package frostfsid
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
- "math/big"
- "sort"
-
- frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- frostfsidrpclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/google/uuid"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const iteratorBatchSize = 1
-
-const (
- namespaceFlag = "namespace"
- subjectNameFlag = "subject-name"
- subjectKeyFlag = "subject-key"
- subjectAddressFlag = "subject-address"
- extendedFlag = "extended"
- groupNameFlag = "group-name"
- groupIDFlag = "group-id"
-
- rootNamespacePlaceholder = ""
-
- keyFlag = "key"
- keyDescFlag = "Key for storing a value in the subject's KV storage"
- valueFlag = "value"
- valueDescFlag = "Value to be stored in the subject's KV storage"
-)
-
-var (
- Cmd = &cobra.Command{
- Use: "frostfsid",
- Short: "Section for frostfsid interactions commands",
- }
-
- frostfsidCreateNamespaceCmd = &cobra.Command{
- Use: "create-namespace",
- Short: "Create new namespace in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidCreateNamespace,
- }
-
- frostfsidListNamespacesCmd = &cobra.Command{
- Use: "list-namespaces",
- Short: "List all namespaces in frostfsid",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidListNamespaces,
- }
-
- frostfsidCreateSubjectCmd = &cobra.Command{
- Use: "create-subject",
- Short: "Create subject in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidCreateSubject,
- }
-
- frostfsidDeleteSubjectCmd = &cobra.Command{
- Use: "delete-subject",
- Short: "Delete subject from frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidDeleteSubject,
- }
-
- frostfsidListSubjectsCmd = &cobra.Command{
- Use: "list-subjects",
- Short: "List subjects in namespace",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidListSubjects,
- }
-
- frostfsidCreateGroupCmd = &cobra.Command{
- Use: "create-group",
- Short: "Create group in frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidCreateGroup,
- }
-
- frostfsidDeleteGroupCmd = &cobra.Command{
- Use: "delete-group",
- Short: "Delete group from frostfsid contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidDeleteGroup,
- }
-
- frostfsidListGroupsCmd = &cobra.Command{
- Use: "list-groups",
- Short: "List groups in namespace",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidListGroups,
- }
-
- frostfsidAddSubjectToGroupCmd = &cobra.Command{
- Use: "add-subject-to-group",
- Short: "Add subject to group",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidAddSubjectToGroup,
- }
-
- frostfsidRemoveSubjectFromGroupCmd = &cobra.Command{
- Use: "remove-subject-from-group",
- Short: "Remove subject from group",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidRemoveSubjectFromGroup,
- }
-
- frostfsidListGroupSubjectsCmd = &cobra.Command{
- Use: "list-group-subjects",
- Short: "List subjects in group",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidListGroupSubjects,
- }
-
- frostfsidSetKVCmd = &cobra.Command{
- Use: "set-kv",
- Short: "Store a key-value pair in the subject's KV storage",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidSetKV,
- }
- frostfsidDeleteKVCmd = &cobra.Command{
- Use: "delete-kv",
- Short: "Delete a value from the subject's KV storage",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: frostfsidDeleteKV,
- }
-)
-
-func initFrostfsIDCreateNamespaceCmd() {
- Cmd.AddCommand(frostfsidCreateNamespaceCmd)
- frostfsidCreateNamespaceCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidCreateNamespaceCmd.Flags().String(namespaceFlag, "", "Namespace name to create")
- frostfsidCreateNamespaceCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- _ = frostfsidCreateNamespaceCmd.MarkFlagRequired(namespaceFlag)
-}
-
-func initFrostfsIDListNamespacesCmd() {
- Cmd.AddCommand(frostfsidListNamespacesCmd)
- frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
-}
-
-func initFrostfsIDCreateSubjectCmd() {
- Cmd.AddCommand(frostfsidCreateSubjectCmd)
- frostfsidCreateSubjectCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidCreateSubjectCmd.Flags().String(namespaceFlag, "", "Namespace where create subject")
- frostfsidCreateSubjectCmd.Flags().String(subjectNameFlag, "", "Subject name, must be unique in namespace")
- frostfsidCreateSubjectCmd.Flags().String(subjectKeyFlag, "", "Subject hex-encoded public key")
- frostfsidCreateSubjectCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initFrostfsIDDeleteSubjectCmd() {
- Cmd.AddCommand(frostfsidDeleteSubjectCmd)
- frostfsidDeleteSubjectCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidDeleteSubjectCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidDeleteSubjectCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initFrostfsIDListSubjectsCmd() {
- Cmd.AddCommand(frostfsidListSubjectsCmd)
- frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
- frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
-}
-
-func initFrostfsIDCreateGroupCmd() {
- Cmd.AddCommand(frostfsidCreateGroupCmd)
- frostfsidCreateGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidCreateGroupCmd.Flags().String(namespaceFlag, "", "Namespace where create group")
- frostfsidCreateGroupCmd.Flags().String(groupNameFlag, "", "Group name, must be unique in namespace")
- frostfsidCreateGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- _ = frostfsidCreateGroupCmd.MarkFlagRequired(groupNameFlag)
-}
-
-func initFrostfsIDDeleteGroupCmd() {
- Cmd.AddCommand(frostfsidDeleteGroupCmd)
- frostfsidDeleteGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidDeleteGroupCmd.Flags().String(namespaceFlag, "", "Namespace to delete group")
- frostfsidDeleteGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidDeleteGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initFrostfsIDListGroupsCmd() {
- Cmd.AddCommand(frostfsidListGroupsCmd)
- frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups")
-}
-
-func initFrostfsIDAddSubjectToGroupCmd() {
- Cmd.AddCommand(frostfsidAddSubjectToGroupCmd)
- frostfsidAddSubjectToGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidAddSubjectToGroupCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidAddSubjectToGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidAddSubjectToGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initFrostfsIDRemoveSubjectFromGroupCmd() {
- Cmd.AddCommand(frostfsidRemoveSubjectFromGroupCmd)
- frostfsidRemoveSubjectFromGroupCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidRemoveSubjectFromGroupCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidRemoveSubjectFromGroupCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidRemoveSubjectFromGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initFrostfsIDListGroupSubjectsCmd() {
- Cmd.AddCommand(frostfsidListGroupSubjectsCmd)
- frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
- frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
- frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
-}
-
-func initFrostfsIDSetKVCmd() {
- Cmd.AddCommand(frostfsidSetKVCmd)
- frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
- frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
-}
-
-func initFrostfsIDDeleteKVCmd() {
- Cmd.AddCommand(frostfsidDeleteKVCmd)
- frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
- frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
-}
-
-func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
- ns := getFrostfsIDNamespace(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.CreateNamespaceCall(ns))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "create namespace error: %w", err)
-}
-
-func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
- inv, _, hash := initInvoker(cmd)
- reader := frostfsidrpclient.NewReader(inv, hash)
- sessionID, it, err := reader.ListNamespaces()
- commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
- items, err := readIterator(inv, &it, sessionID)
- commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
-
- namespaces, err := frostfsidclient.ParseNamespaces(items)
- commonCmd.ExitOnErr(cmd, "can't parse namespace: %w", err)
- sort.Slice(namespaces, func(i, j int) bool { return namespaces[i].Name < namespaces[j].Name })
-
- for _, namespace := range namespaces {
- if namespace.Name == "" {
- namespace.Name = rootNamespacePlaceholder
- }
- cmd.Printf("%s\n", namespace.Name)
- }
-}
-
-func frostfsidCreateSubject(cmd *cobra.Command, _ []string) {
- ns := getFrostfsIDNamespace(cmd)
- subjName := getFrostfsIDSubjectName(cmd)
- subjKey := getFrostfsIDSubjectKey(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.CreateSubjectCall(ns, subjKey))
- if subjName != "" {
- ffsid.addCall(ffsid.roCli.SetSubjectNameCall(subjKey.GetScriptHash(), subjName))
- }
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "create subject: %w", err)
-}
-
-func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.DeleteSubjectCall(subjectAddress))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "delete subject error: %w", err)
-}
-
-func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
- extended, _ := cmd.Flags().GetBool(extendedFlag)
- ns := getFrostfsIDNamespace(cmd)
- inv, _, hash := initInvoker(cmd)
- reader := frostfsidrpclient.NewReader(inv, hash)
- sessionID, it, err := reader.ListNamespaceSubjects(ns)
- commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
-
- subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
- commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
-
- sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
-
- for _, addr := range subAddresses {
- if !extended {
- cmd.Println(address.Uint160ToString(addr))
- continue
- }
-
- items, err := reader.GetSubject(addr)
- commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
-
- subj, err := frostfsidclient.ParseSubject(items)
- commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
-
- printSubjectInfo(cmd, addr, subj)
- cmd.Println()
- }
-}
-
-func frostfsidCreateGroup(cmd *cobra.Command, _ []string) {
- ns := getFrostfsIDNamespace(cmd)
- groupName := getFrostfsIDGroupName(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.CreateGroupCall(ns, groupName))
-
- groupID, err := ffsid.roCli.ParseGroupID(ffsid.sendWaitRes())
- commonCmd.ExitOnErr(cmd, "create group: %w", err)
-
- cmd.Printf("group '%s' created with id: %d\n", groupName, groupID)
-}
-
-func frostfsidDeleteGroup(cmd *cobra.Command, _ []string) {
- ns := getFrostfsIDNamespace(cmd)
- groupID := getFrostfsIDGroupID(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.DeleteGroupCall(ns, groupID))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "delete group error: %w", err)
-}
-
-func frostfsidListGroups(cmd *cobra.Command, _ []string) {
- inv, _, hash := initInvoker(cmd)
- ns := getFrostfsIDNamespace(cmd)
-
- reader := frostfsidrpclient.NewReader(inv, hash)
- sessionID, it, err := reader.ListGroups(ns)
- commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
-
- items, err := readIterator(inv, &it, sessionID)
- commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
- groups, err := frostfsidclient.ParseGroups(items)
- commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
-
- sort.Slice(groups, func(i, j int) bool { return groups[i].Name < groups[j].Name })
-
- for _, group := range groups {
- cmd.Printf("%s (%d)\n", group.Name, group.ID)
- }
-}
-
-func frostfsidAddSubjectToGroup(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- groupID := getFrostfsIDGroupID(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.AddSubjectToGroupCall(subjectAddress, groupID))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "add subject to group error: %w", err)
-}
-
-func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- groupID := getFrostfsIDGroupID(cmd)
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- ffsid.addCall(ffsid.roCli.RemoveSubjectFromGroupCall(subjectAddress, groupID))
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
-}
-
-func frostfsidSetKV(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- key, _ := cmd.Flags().GetString(keyFlag)
- value, _ := cmd.Flags().GetString(valueFlag)
-
- if key == "" {
- commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
- }
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
-
- ffsid.addCall(method, args)
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "set KV: %w", err)
-}
-
-func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
- subjectAddress := getFrostfsIDSubjectAddress(cmd)
- key, _ := cmd.Flags().GetString(keyFlag)
-
- if key == "" {
- commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
- }
-
- ffsid, err := newFrostfsIDClient(cmd)
- commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
-
- method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
-
- ffsid.addCall(method, args)
-
- err = ffsid.sendWait()
- commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
-}
-
-func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
- ns := getFrostfsIDNamespace(cmd)
- groupID := getFrostfsIDGroupID(cmd)
- extended, _ := cmd.Flags().GetBool(extendedFlag)
- inv, cs, hash := initInvoker(cmd)
- _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
- commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
-
- reader := frostfsidrpclient.NewReader(inv, hash)
- sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
- commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
-
- items, err := readIterator(inv, &it, sessionID)
- commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
-
- subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
- commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
-
- sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
-
- for _, subjAddr := range subjects {
- if !extended {
- cmd.Println(address.Uint160ToString(subjAddr))
- continue
- }
-
- items, err := reader.GetSubject(subjAddr)
- commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
- subj, err := frostfsidclient.ParseSubject(items)
- commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
- printSubjectInfo(cmd, subjAddr, subj)
- cmd.Println()
- }
-}
-
-type frostfsidClient struct {
- bw *io.BufBinWriter
- contractHash util.Uint160
- roCli *frostfsidclient.Client // client can be used only for waiting tx, parsing and forming method params
- wCtx *helper.InitializeContext
-}
-
-func newFrostfsIDClient(cmd *cobra.Command) (*frostfsidClient, error) {
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return nil, fmt.Errorf("can't initialize context: %w", err)
- }
-
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- cs, err := helper.GetContractByID(r, 1)
- if err != nil {
- return nil, fmt.Errorf("can't get NNS contract info: %w", err)
- }
-
- ffsidHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
- if err != nil {
- return nil, fmt.Errorf("can't get proxy contract hash: %w", err)
- }
-
- return &frostfsidClient{
- bw: io.NewBufBinWriter(),
- contractHash: ffsidHash,
- roCli: frostfsidclient.NewSimple(wCtx.CommitteeAct, ffsidHash),
- wCtx: wCtx,
- }, nil
-}
-
-func (f *frostfsidClient) addCall(method string, args []any) {
- emit.AppCall(f.bw.BinWriter, f.contractHash, method, callflag.All, args...)
-}
-
-func (f *frostfsidClient) sendWait() error {
- if err := f.wCtx.SendConsensusTx(f.bw.Bytes()); err != nil {
- return err
- }
- f.bw.Reset()
-
- return f.wCtx.AwaitTx()
-}
-
-func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
- if err := f.wCtx.SendConsensusTx(f.bw.Bytes()); err != nil {
- return nil, err
- }
- f.bw.Reset()
-
- f.wCtx.Command.Println("Waiting for transactions to persist...")
- return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
-}
-
-func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
- var shouldStop bool
- res := make([]stackitem.Item, 0)
- for !shouldStop {
- items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
- if err != nil {
- return nil, err
- }
-
- res = append(res, items...)
- shouldStop = len(items) < iteratorBatchSize
- }
-
- return res, nil
-}
-
-func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
-
- inv := invoker.New(c, nil)
- r := management.NewReader(inv)
-
- cs, err := r.GetContractByID(1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
-
- nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
- commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
-
- return inv, cs, nmHash
-}
-
-func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
- cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
- pk := ""
- if subj.PrimaryKey != nil {
- pk = hex.EncodeToString(subj.PrimaryKey.Bytes())
- }
- cmd.Printf("Primary key: %s\n", pk)
- cmd.Printf("Name: %s\n", subj.Name)
- cmd.Printf("Namespace: %s\n", subj.Namespace)
- if len(subj.AdditionalKeys) > 0 {
- cmd.Printf("Additional keys:\n")
- for _, key := range subj.AdditionalKeys {
- k := ""
- if key != nil {
- k = hex.EncodeToString(key.Bytes())
- }
- cmd.Printf("- %s\n", k)
- }
- }
- if len(subj.KV) > 0 {
- cmd.Printf("KV:\n")
- for k, v := range subj.KV {
- cmd.Printf("- %s: %s\n", k, v)
- }
- }
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go
deleted file mode 100644
index 541a459c1..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package frostfsid
-
-import (
- "errors"
- "fmt"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/spf13/cobra"
-)
-
-func getFrostfsIDSubjectKey(cmd *cobra.Command) *keys.PublicKey {
- subjKeyHex, _ := cmd.Flags().GetString(subjectKeyFlag)
- subjKey, err := keys.NewPublicKeyFromString(subjKeyHex)
- commonCmd.ExitOnErr(cmd, "invalid subject key: %w", err)
- return subjKey
-}
-
-func getFrostfsIDSubjectAddress(cmd *cobra.Command) util.Uint160 {
- subjAddress, _ := cmd.Flags().GetString(subjectAddressFlag)
- subjAddr, err := address.StringToUint160(subjAddress)
- commonCmd.ExitOnErr(cmd, "invalid subject address: %w", err)
- return subjAddr
-}
-
-func getFrostfsIDSubjectName(cmd *cobra.Command) string {
- subjectName, _ := cmd.Flags().GetString(subjectNameFlag)
-
- if subjectName == "" {
- return ""
- }
-
- if !ape.SubjectNameRegexp.MatchString(subjectName) {
- commonCmd.ExitOnErr(cmd, "invalid subject name: %w",
- fmt.Errorf("name must match regexp: %s", ape.SubjectNameRegexp.String()))
- }
-
- return subjectName
-}
-
-func getFrostfsIDGroupName(cmd *cobra.Command) string {
- groupName, _ := cmd.Flags().GetString(groupNameFlag)
-
- if !ape.GroupNameRegexp.MatchString(groupName) {
- commonCmd.ExitOnErr(cmd, "invalid group name: %w",
- fmt.Errorf("name must match regexp: %s", ape.GroupNameRegexp.String()))
- }
-
- return groupName
-}
-
-func getFrostfsIDGroupID(cmd *cobra.Command) int64 {
- groupID, _ := cmd.Flags().GetInt64(groupIDFlag)
- if groupID <= 0 {
- commonCmd.ExitOnErr(cmd, "invalid group id: %w",
- errors.New("group id must be positive integer"))
- }
-
- return groupID
-}
-
-func getFrostfsIDNamespace(cmd *cobra.Command) string {
- ns, _ := cmd.Flags().GetString(namespaceFlag)
- if ns == rootNamespacePlaceholder {
- ns = ""
- }
-
- if !ape.NamespaceNameRegexp.MatchString(ns) {
- commonCmd.ExitOnErr(cmd, "invalid namespace: %w",
- fmt.Errorf("name must match regexp: %s", ape.NamespaceNameRegexp.String()))
- }
-
- return ns
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
deleted file mode 100644
index 1d0bc8441..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package frostfsid
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
- "github.com/stretchr/testify/require"
-)
-
-func TestNamespaceRegexp(t *testing.T) {
- for _, tc := range []struct {
- name string
- namespace string
- matched bool
- }{
- {
- name: "root empty ns",
- namespace: "",
- matched: true,
- },
- {
- name: "simple valid ns",
- namespace: "my-namespace-123",
- matched: true,
- },
- {
- name: "root placeholder",
- namespace: "",
- matched: false,
- },
- {
- name: "too long",
- namespace: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz",
- matched: false,
- },
- {
- name: "start with hyphen",
- namespace: "-ns",
- matched: false,
- },
- {
- name: "end with hyphen",
- namespace: "ns-",
- matched: false,
- },
- {
- name: "with spaces",
- namespace: "ns ns",
- matched: false,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- require.Equal(t, tc.matched, ape.NamespaceNameRegexp.MatchString(tc.namespace))
- })
- }
-}
-
-func TestSubjectNameRegexp(t *testing.T) {
- for _, tc := range []struct {
- name string
- subject string
- matched bool
- }{
- {
- name: "empty",
- subject: "",
- matched: false,
- },
- {
- name: "invalid",
- subject: "invalid{name}",
- matched: false,
- },
- {
- name: "too long",
- subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz",
- matched: false,
- },
- {
- name: "valid",
- subject: "valid_name.012345@6789",
- matched: true,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- require.Equal(t, tc.matched, ape.SubjectNameRegexp.MatchString(tc.subject))
- })
- }
-}
-
-func TestSubjectGroupRegexp(t *testing.T) {
- for _, tc := range []struct {
- name string
- subject string
- matched bool
- }{
- {
- name: "empty",
- subject: "",
- matched: false,
- },
- {
- name: "invalid",
- subject: "invalid{name}",
- matched: false,
- },
- {
- name: "too long",
- subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz",
- matched: false,
- },
- {
- name: "long",
- subject: "abcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyzabcdefghijklmnopkrstuvwxyz",
- matched: true,
- },
- {
- name: "valid",
- subject: "valid_name.012345@6789",
- matched: true,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- require.Equal(t, tc.matched, ape.GroupNameRegexp.MatchString(tc.subject))
- })
- }
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
deleted file mode 100644
index 8aad5c5c1..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package frostfsid
-
-func init() {
- initFrostfsIDCreateNamespaceCmd()
- initFrostfsIDListNamespacesCmd()
- initFrostfsIDCreateSubjectCmd()
- initFrostfsIDDeleteSubjectCmd()
- initFrostfsIDListSubjectsCmd()
- initFrostfsIDCreateGroupCmd()
- initFrostfsIDDeleteGroupCmd()
- initFrostfsIDListGroupsCmd()
- initFrostfsIDAddSubjectToGroupCmd()
- initFrostfsIDRemoveSubjectFromGroupCmd()
- initFrostfsIDListGroupSubjectsCmd()
- initFrostfsIDSetKVCmd()
- initFrostfsIDDeleteKVCmd()
- initFrostfsIDAddSubjectKeyCmd()
- initFrostfsIDRemoveSubjectKeyCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate.go
new file mode 100644
index 000000000..55321582b
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/generate.go
@@ -0,0 +1,232 @@
+package morph
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ singleAccountName = "single"
+ committeeAccountName = "committee"
+ consensusAccountName = "consensus"
+)
+
+func generateAlphabetCreds(cmd *cobra.Command, _ []string) error {
+ // alphabet size is not part of the config
+ size, err := cmd.Flags().GetUint(alphabetSizeFlag)
+ if err != nil {
+ return err
+ }
+ if size == 0 {
+ return errors.New("size must be > 0")
+ }
+ if size > maxAlphabetNodes {
+ return ErrTooManyAlphabetNodes
+ }
+
+ v := viper.GetViper()
+ walletDir := config.ResolveHomePath(viper.GetString(alphabetWalletsFlag))
+ pwds, err := initializeWallets(v, walletDir, int(size))
+ if err != nil {
+ return err
+ }
+
+ _, err = initializeContractWallet(v, walletDir)
+ if err != nil {
+ return err
+ }
+
+ cmd.Println("size:", size)
+ cmd.Println("alphabet-wallets:", walletDir)
+ for i := range pwds {
+ cmd.Printf("wallet[%d]: %s\n", i, pwds[i])
+ }
+
+ return nil
+}
+
+func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, error) {
+ wallets := make([]*wallet.Wallet, size)
+ pubs := make(keys.PublicKeys, size)
+ passwords := make([]string, size)
+
+ for i := range wallets {
+ password, err := config.GetPassword(v, innerring.GlagoliticLetter(i).String())
+ if err != nil {
+ return nil, fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
+ f, err := os.OpenFile(p, os.O_CREATE, 0644)
+ if err != nil {
+ return nil, fmt.Errorf("can't create wallet file: %w", err)
+ }
+ if err := f.Close(); err != nil {
+ return nil, fmt.Errorf("can't close wallet file: %w", err)
+ }
+ w, err := wallet.NewWallet(p)
+ if err != nil {
+ return nil, fmt.Errorf("can't create wallet: %w", err)
+ }
+ if err := w.CreateAccount(singleAccountName, password); err != nil {
+ return nil, fmt.Errorf("can't create account: %w", err)
+ }
+
+ passwords[i] = password
+ wallets[i] = w
+ pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
+ }
+
+ var errG errgroup.Group
+
+ // Create committee account with N/2+1 multi-signature.
+ majCount := smartcontract.GetMajorityHonestNodeCount(size)
+ // Create consensus account with 2*N/3+1 multi-signature.
+ bftCount := smartcontract.GetDefaultHonestNodeCount(size)
+ for i := range wallets {
+ i := i
+ ps := pubs.Copy()
+ errG.Go(func() error {
+ if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], ps); err != nil {
+ return fmt.Errorf("can't create committee account: %w", err)
+ }
+ if err := addMultisigAccount(wallets[i], bftCount, consensusAccountName, passwords[i], ps); err != nil {
+ return fmt.Errorf("can't create consentus account: %w", err)
+ }
+ if err := wallets[i].SavePretty(); err != nil {
+ return fmt.Errorf("can't save wallet: %w", err)
+ }
+ return nil
+ })
+ }
+ if err := errG.Wait(); err != nil {
+ return nil, err
+ }
+ return passwords, nil
+}
+
+func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs keys.PublicKeys) error {
+ acc := wallet.NewAccountFromPrivateKey(w.Accounts[0].PrivateKey())
+ acc.Label = name
+
+ if err := acc.ConvertMultisig(m, pubs); err != nil {
+ return err
+ }
+ if err := acc.Encrypt(password, keys.NEP2ScryptParams()); err != nil {
+ return err
+ }
+ w.AddAccount(acc)
+ return nil
+}
+
+func generateStorageCreds(cmd *cobra.Command, _ []string) error {
+ return refillGas(cmd, storageGasConfigFlag, true)
+}
+
+func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
+ // storage wallet path is not part of the config
+ storageWalletPath, _ := cmd.Flags().GetString(storageWalletFlag)
+ // wallet address is not part of the config
+ walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
+
+ var gasReceiver util.Uint160
+
+ if len(walletAddress) != 0 {
+ gasReceiver, err = address.StringToUint160(walletAddress)
+ if err != nil {
+ return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
+ }
+ } else {
+ if storageWalletPath == "" {
+ return fmt.Errorf("missing wallet path (use '--%s ')", storageWalletFlag)
+ }
+
+ var w *wallet.Wallet
+
+ if createWallet {
+ w, err = wallet.NewWallet(storageWalletPath)
+ } else {
+ w, err = wallet.NewWalletFromFile(storageWalletPath)
+ }
+
+ if err != nil {
+ return fmt.Errorf("can't create wallet: %w", err)
+ }
+
+ if createWallet {
+ var password string
+
+ label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
+ password, err := config.GetStoragePassword(viper.GetViper(), label)
+ if err != nil {
+ return fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ if label == "" {
+ label = singleAccountName
+ }
+
+ if err := w.CreateAccount(label, password); err != nil {
+ return fmt.Errorf("can't create account: %w", err)
+ }
+ }
+
+ gasReceiver = w.Accounts[0].Contract.ScriptHash()
+ }
+
+ gasStr := viper.GetString(gasFlag)
+
+ gasAmount, err := parseGASAmount(gasStr)
+ if err != nil {
+ return err
+ }
+
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
+ if err != nil {
+ return err
+ }
+
+ bw := io.NewBufBinWriter()
+ emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
+ wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+ if bw.Err != nil {
+ return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
+ }
+
+ if err := wCtx.sendCommitteeTx(bw.Bytes(), false); err != nil {
+ return err
+ }
+
+ return wCtx.awaitTx()
+}
+
+func parseGASAmount(s string) (fixedn.Fixed8, error) {
+ gasAmount, err := fixedn.Fixed8FromString(s)
+ if err != nil {
+ return 0, fmt.Errorf("invalid GAS amount %s: %w", s, err)
+ }
+ if gasAmount <= 0 {
+ return 0, fmt.Errorf("GAS amount must be positive (got %d)", gasAmount)
+ }
+ return gasAmount, nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
deleted file mode 100644
index 78f8617f1..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package generate
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
- "golang.org/x/sync/errgroup"
-)
-
-func AlphabetCreds(cmd *cobra.Command, _ []string) error {
- // alphabet size is not part of the config
- size, err := cmd.Flags().GetUint(commonflags.AlphabetSizeFlag)
- if err != nil {
- return err
- }
- if size == 0 {
- return errors.New("size must be > 0")
- }
- if size > constants.MaxAlphabetNodes {
- return helper.ErrTooManyAlphabetNodes
- }
-
- v := viper.GetViper()
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- pwds, err := initializeWallets(v, walletDir, int(size))
- if err != nil {
- return err
- }
-
- _, err = helper.InitializeContractWallet(v, walletDir)
- if err != nil {
- return err
- }
-
- cmd.Println("size:", size)
- cmd.Println("alphabet-wallets:", walletDir)
- for i := range pwds {
- cmd.Printf("wallet[%d]: %s\n", i, pwds[i])
- }
-
- return nil
-}
-
-func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, error) {
- wallets := make([]*wallet.Wallet, size)
- pubs := make(keys.PublicKeys, size)
- passwords := make([]string, size)
-
- var errG errgroup.Group
-
- for i := range wallets {
- password, err := config.GetPassword(v, innerring.GlagoliticLetter(i).String())
- if err != nil {
- return nil, fmt.Errorf("can't fetch password: %w", err)
- }
-
- errG.Go(func() error {
- p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
- f, err := os.OpenFile(p, os.O_CREATE, 0o644)
- if err != nil {
- return fmt.Errorf("can't create wallet file: %w", err)
- }
- if err := f.Close(); err != nil {
- return fmt.Errorf("can't close wallet file: %w", err)
- }
- w, err := wallet.NewWallet(p)
- if err != nil {
- return fmt.Errorf("can't create wallet: %w", err)
- }
- if err := w.CreateAccount(constants.SingleAccountName, password); err != nil {
- return fmt.Errorf("can't create account: %w", err)
- }
-
- passwords[i] = password
- wallets[i] = w
- pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
- return nil
- })
- }
-
- if err := errG.Wait(); err != nil {
- return nil, err
- }
-
- // Create committee account with N/2+1 multi-signature.
- majCount := smartcontract.GetMajorityHonestNodeCount(size)
- // Create consensus account with 2*N/3+1 multi-signature.
- bftCount := smartcontract.GetDefaultHonestNodeCount(size)
- for i := range wallets {
- ps := pubs.Copy()
- errG.Go(func() error {
- if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
- return fmt.Errorf("can't create committee account: %w", err)
- }
- if err := addMultisigAccount(wallets[i], bftCount, constants.ConsensusAccountName, passwords[i], ps); err != nil {
- return fmt.Errorf("can't create consentus account: %w", err)
- }
- if err := wallets[i].SavePretty(); err != nil {
- return fmt.Errorf("can't save wallet: %w", err)
- }
- return nil
- })
- }
- if err := errG.Wait(); err != nil {
- return nil, err
- }
- return passwords, nil
-}
-
-func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs keys.PublicKeys) error {
- acc := wallet.NewAccountFromPrivateKey(w.Accounts[0].PrivateKey())
- acc.Label = name
-
- if err := acc.ConvertMultisig(m, pubs); err != nil {
- return err
- }
- if err := acc.Encrypt(password, keys.NEP2ScryptParams()); err != nil {
- return err
- }
- w.AddAccount(acc)
- return nil
-}
-
-func generateStorageCreds(cmd *cobra.Command, _ []string) error {
- walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
- w, err := wallet.NewWallet(walletPath)
- if err != nil {
- return fmt.Errorf("create wallet: %w", err)
- }
-
- label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
- password, err := config.GetStoragePassword(viper.GetViper(), label)
- if err != nil {
- return fmt.Errorf("can't fetch password: %w", err)
- }
-
- if label == "" {
- label = constants.SingleAccountName
- }
-
- if err := w.CreateAccount(label, password); err != nil {
- return fmt.Errorf("can't create account: %w", err)
- }
- return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
-}
-
-func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
- gasStr := viper.GetString(gasFlag)
-
- gasAmount, err := helper.ParseGASAmount(gasStr)
- if err != nil {
- return err
- }
-
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return err
- }
-
- bw := io.NewBufBinWriter()
- for _, gasReceiver := range gasReceivers {
- emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
- wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- }
- if bw.Err != nil {
- return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
- }
-
- if err := wCtx.SendCommitteeTx(bw.Bytes(), false); err != nil {
- return err
- }
-
- return wCtx.AwaitTx()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go
deleted file mode 100644
index 73c986713..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package generate
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- storageWalletLabelFlag = "label"
- storageGasCLIFlag = "initial-gas"
- storageGasConfigFlag = "storage.initial_gas"
- walletAddressFlag = "wallet-address"
-)
-
-var (
- GenerateStorageCmd = &cobra.Command{
- Use: "generate-storage-wallet",
- Short: "Generate storage node wallet for the morph network",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(storageGasConfigFlag, cmd.Flags().Lookup(storageGasCLIFlag))
- },
- RunE: generateStorageCreds,
- }
- RefillGasCmd = &cobra.Command{
- Use: "refill-gas",
- Short: "Refill GAS of storage node's wallet in the morph network",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
- },
- RunE: func(cmd *cobra.Command, _ []string) error {
- storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
- walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
-
- var gasReceivers []util.Uint160
- for _, walletAddress := range walletAddresses {
- addr, err := address.StringToUint160(walletAddress)
- if err != nil {
- return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
- }
-
- gasReceivers = append(gasReceivers, addr)
- }
- for _, storageWalletPath := range storageWalletPaths {
- w, err := wallet.NewWalletFromFile(storageWalletPath)
- if err != nil {
- return fmt.Errorf("can't create wallet: %w", err)
- }
-
- gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
- }
- return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
- },
- }
- GenerateAlphabetCmd = &cobra.Command{
- Use: "generate-alphabet",
- Short: "Generate alphabet wallets for consensus nodes of the morph network",
- PreRun: func(cmd *cobra.Command, _ []string) {
- // PreRun fixes https://github.com/spf13/viper/issues/233
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- RunE: AlphabetCreds,
- }
-)
-
-func initRefillGasCmd() {
- RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
- RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
- RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
- RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
-}
-
-func initGenerateStorageCmd() {
- GenerateStorageCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- GenerateStorageCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- GenerateStorageCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to new storage node wallet")
- GenerateStorageCmd.Flags().String(storageGasCLIFlag, "", "Initial amount of GAS to transfer")
- GenerateStorageCmd.Flags().StringP(storageWalletLabelFlag, "l", "", "Wallet label")
-}
-
-func initGenerateAlphabetCmd() {
- GenerateAlphabetCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- GenerateAlphabetCmd.Flags().Uint(commonflags.AlphabetSizeFlag, 7, "Amount of alphabet wallets to generate")
-}
-
-func init() {
- initRefillGasCmd()
- initGenerateStorageCmd()
- initGenerateAlphabetCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate_test.go
similarity index 60%
rename from cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
rename to cmd/frostfs-adm/internal/modules/morph/generate_test.go
index 15af5637b..7f0a2708c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate_test.go
@@ -1,4 +1,4 @@
-package generate
+package morph
import (
"bytes"
@@ -10,8 +10,6 @@ import (
"sync"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -22,53 +20,57 @@ import (
"golang.org/x/term"
)
+const testContractPassword = "grouppass"
+
func TestGenerateAlphabet(t *testing.T) {
+ const size = 4
+
walletDir := t.TempDir()
buf := setupTestTerminal(t)
- cmd := GenerateAlphabetCmd
+ cmd := generateAlphabetCmd
v := viper.GetViper()
t.Run("zero size", func(t *testing.T) {
buf.Reset()
- v.Set(commonflags.AlphabetWalletsFlag, walletDir)
- require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "0"))
+ v.Set(alphabetWalletsFlag, walletDir)
+ require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, "0"))
buf.WriteString("pass\r")
- require.Error(t, AlphabetCreds(cmd, nil))
+ require.Error(t, generateAlphabetCreds(cmd, nil))
})
t.Run("no password provided", func(t *testing.T) {
buf.Reset()
- v.Set(commonflags.AlphabetWalletsFlag, walletDir)
- require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1"))
- require.Error(t, AlphabetCreds(cmd, nil))
+ v.Set(alphabetWalletsFlag, walletDir)
+ require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, "1"))
+ require.Error(t, generateAlphabetCreds(cmd, nil))
})
t.Run("missing directory", func(t *testing.T) {
buf.Reset()
dir := filepath.Join(os.TempDir(), "notexist."+strconv.FormatUint(rand.Uint64(), 10))
- v.Set(commonflags.AlphabetWalletsFlag, dir)
- require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1"))
+ v.Set(alphabetWalletsFlag, dir)
+ require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, "1"))
buf.WriteString("pass\r")
- require.Error(t, AlphabetCreds(cmd, nil))
+ require.Error(t, generateAlphabetCreds(cmd, nil))
})
t.Run("no password for contract group wallet", func(t *testing.T) {
buf.Reset()
- v.Set(commonflags.AlphabetWalletsFlag, walletDir)
- require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1"))
- buf.WriteString("pass\r")
- require.Error(t, AlphabetCreds(cmd, nil))
+ v.Set(alphabetWalletsFlag, walletDir)
+ require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, strconv.FormatUint(size, 10)))
+ for i := uint64(0); i < size; i++ {
+ buf.WriteString(strconv.FormatUint(i, 10) + "\r")
+ }
+ require.Error(t, generateAlphabetCreds(cmd, nil))
})
- const size = 4
-
buf.Reset()
- v.Set(commonflags.AlphabetWalletsFlag, walletDir)
- require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
- for i := range uint64(size) {
+ v.Set(alphabetWalletsFlag, walletDir)
+ require.NoError(t, generateAlphabetCmd.Flags().Set(alphabetSizeFlag, strconv.FormatUint(size, 10)))
+ for i := uint64(0); i < size; i++ {
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
}
- buf.WriteString(constants.TestContractPassword + "\r")
- require.NoError(t, AlphabetCreds(GenerateAlphabetCmd, nil))
+ buf.WriteString(testContractPassword + "\r")
+ require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
var wg sync.WaitGroup
for i := uint64(0); i < size; i++ {
@@ -85,12 +87,12 @@ func TestGenerateAlphabet(t *testing.T) {
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
require.NoError(t, err, "can't decrypt account")
switch a.Label {
- case constants.ConsensusAccountName:
+ case consensusAccountName:
require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters))
- case constants.CommitteeAccountName:
+ case committeeAccountName:
require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters))
default:
- require.Equal(t, constants.SingleAccountName, a.Label)
+ require.Equal(t, singleAccountName, a.Label)
}
}
}()
@@ -98,11 +100,11 @@ func TestGenerateAlphabet(t *testing.T) {
wg.Wait()
t.Run("check contract group wallet", func(t *testing.T) {
- p := filepath.Join(walletDir, constants.ContractWalletFilename)
+ p := filepath.Join(walletDir, contractWalletFilename)
w, err := wallet.NewWalletFromFile(p)
require.NoError(t, err, "contract wallet doesn't exist")
require.Equal(t, 1, len(w.Accounts), "contract wallet must have 1 accout")
- require.NoError(t, w.Accounts[0].Decrypt(constants.TestContractPassword, keys.NEP2ScryptParams()))
+ require.NoError(t, w.Accounts[0].Decrypt(testContractPassword, keys.NEP2ScryptParams()))
})
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/group.go b/cmd/frostfs-adm/internal/modules/morph/group.go
new file mode 100644
index 000000000..3fdffd4e6
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/group.go
@@ -0,0 +1,106 @@
+package morph
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ contractWalletFilename = "contract.json"
+ contractWalletPasswordKey = "contract"
+)
+
+func initializeContractWallet(v *viper.Viper, walletDir string) (*wallet.Wallet, error) {
+ password, err := config.GetPassword(v, contractWalletPasswordKey)
+ if err != nil {
+ return nil, err
+ }
+
+ w, err := wallet.NewWallet(filepath.Join(walletDir, contractWalletFilename))
+ if err != nil {
+ return nil, err
+ }
+
+ acc, err := wallet.NewAccount()
+ if err != nil {
+ return nil, err
+ }
+
+ err = acc.Encrypt(password, keys.NEP2ScryptParams())
+ if err != nil {
+ return nil, err
+ }
+
+ w.AddAccount(acc)
+ if err := w.SavePretty(); err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+func openContractWallet(v *viper.Viper, cmd *cobra.Command, walletDir string) (*wallet.Wallet, error) {
+ p := filepath.Join(walletDir, contractWalletFilename)
+ w, err := wallet.NewWalletFromFile(p)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, fmt.Errorf("can't open wallet: %w", err)
+ }
+
+ cmd.Printf("Contract group wallet is missing, initialize at %s\n", p)
+ return initializeContractWallet(v, walletDir)
+ }
+
+ password, err := config.GetPassword(v, contractWalletPasswordKey)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := range w.Accounts {
+ if err := w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
+ return nil, fmt.Errorf("can't unlock wallet: %w", err)
+ }
+ }
+
+ return w, nil
+}
+
+func (c *initializeContext) addManifestGroup(h util.Uint160, cs *contractState) error {
+ priv := c.ContractWallet.Accounts[0].PrivateKey()
+ pub := priv.PublicKey()
+
+ sig := priv.Sign(h.BytesBE())
+ found := false
+
+ for i := range cs.Manifest.Groups {
+ if cs.Manifest.Groups[i].PublicKey.Equal(pub) {
+ cs.Manifest.Groups[i].Signature = sig
+ found = true
+ break
+ }
+ }
+ if !found {
+ cs.Manifest.Groups = append(cs.Manifest.Groups, manifest.Group{
+ PublicKey: pub,
+ Signature: sig,
+ })
+ }
+
+ data, err := json.Marshal(cs.Manifest)
+ if err != nil {
+ return err
+ }
+
+ cs.RawManifest = data
+ return nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
deleted file mode 100644
index 6499ace5f..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package helper
-
-import (
- "fmt"
-
- "github.com/google/uuid"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/viper"
-)
-
-// LocalActor is a kludge, do not use it outside of the morph commands.
-type LocalActor struct {
- neoActor *actor.Actor
- accounts []*wallet.Account
- Invoker *invoker.Invoker
- rpcInvoker invoker.RPCInvoke
-}
-
-type AlphabetWallets struct {
- Label string
- Path string
-}
-
-func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
- w, err := GetAlphabetWallets(v, a.Path)
- if err != nil {
- return nil, err
- }
-
- var accounts []*wallet.Account
- for _, wall := range w {
- acc, err := GetWalletAccount(wall, a.Label)
- if err != nil {
- return nil, err
- }
- accounts = append(accounts, acc)
- }
- return accounts, nil
-}
-
-type RegularWallets struct{ Path string }
-
-func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
- w, err := getRegularWallet(r.Path)
- if err != nil {
- return nil, err
- }
-
- return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
-}
-
-// NewLocalActor create LocalActor with accounts form provided wallets.
-// In case of empty wallets provided created actor with dummy account only for read operation.
-//
-// If wallets are provided, the contract client will use accounts with accName name from these wallets.
-// To determine which account name should be used in a contract client, refer to how the contract
-// verifies the transaction signature.
-func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) {
- var act *actor.Actor
- var accounts []*wallet.Account
- var signers []actor.SignerAccount
-
- if alphabet != nil {
- account, err := alphabet.GetAccount(viper.GetViper())
- if err != nil {
- return nil, err
- }
-
- accounts = append(accounts, account...)
- signers = append(signers, actor.SignerAccount{
- Signer: transaction.Signer{
- Account: account[0].Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: account[0],
- })
- }
-
- for _, w := range regularWallets {
- if w == nil {
- continue
- }
- account, err := w.GetAccount()
- if err != nil {
- return nil, err
- }
-
- accounts = append(accounts, account...)
- signers = append(signers, actor.SignerAccount{
- Signer: transaction.Signer{
- Account: account[0].Contract.ScriptHash(),
- Scopes: transaction.Global,
- },
- Account: account[0],
- })
- }
-
- act, err := actor.New(c, signers)
- if err != nil {
- return nil, err
- }
- return &LocalActor{
- neoActor: act,
- accounts: accounts,
- Invoker: &act.Invoker,
- rpcInvoker: c,
- }, nil
-}
-
-func (a *LocalActor) SendCall(contract util.Uint160, method string, params ...any) (util.Uint256, uint32, error) {
- tx, err := a.neoActor.MakeCall(contract, method, params...)
- if err != nil {
- return util.Uint256{}, 0, err
- }
- err = a.resign(tx)
- if err != nil {
- return util.Uint256{}, 0, err
- }
- return a.neoActor.Send(tx)
-}
-
-func (a *LocalActor) SendRun(script []byte) (util.Uint256, uint32, error) {
- tx, err := a.neoActor.MakeRun(script)
- if err != nil {
- return util.Uint256{}, 0, err
- }
- err = a.resign(tx)
- if err != nil {
- return util.Uint256{}, 0, err
- }
- return a.neoActor.Send(tx)
-}
-
-// resign is used to sign tx with committee accounts.
-// Inside the methods `MakeCall` and `SendRun` of the NeoGO's actor transaction is signing by committee account,
-// because actor uses committee wallet.
-// But it is not enough, need to sign with another committee accounts.
-func (a *LocalActor) resign(tx *transaction.Transaction) error {
- if len(a.accounts[0].Contract.Parameters) > 1 {
- // Use parameter context to avoid dealing with signature order.
- network := a.neoActor.GetNetwork()
- pc := context.NewParameterContext("", network, tx)
- h := a.accounts[0].Contract.ScriptHash()
- for _, acc := range a.accounts {
- priv := acc.PrivateKey()
- sign := priv.SignHashable(uint32(network), tx)
- if err := pc.AddSignature(h, acc.Contract, priv.PublicKey(), sign); err != nil {
- return fmt.Errorf("can't add signature: %w", err)
- }
- if len(pc.Items[h].Signatures) == len(acc.Contract.Parameters) {
- break
- }
- }
-
- w, err := pc.GetWitness(h)
- if err != nil {
- return fmt.Errorf("incomplete signature: %w", err)
- }
- tx.Scripts[0] = *w
- }
- return nil
-}
-
-func (a *LocalActor) Wait(h util.Uint256, vub uint32, err error) (*state.AppExecResult, error) {
- return a.neoActor.Wait(h, vub, err)
-}
-
-func (a *LocalActor) Sender() util.Uint160 {
- return a.neoActor.Sender()
-}
-
-func (a *LocalActor) Call(contract util.Uint160, operation string, params ...any) (*result.Invoke, error) {
- return a.neoActor.Call(contract, operation, params...)
-}
-
-func (a *LocalActor) CallAndExpandIterator(_ util.Uint160, _ string, _ int, _ ...any) (*result.Invoke, error) {
- panic("unimplemented")
-}
-
-func (a *LocalActor) TerminateSession(_ uuid.UUID) error {
- panic("unimplemented")
-}
-
-func (a *LocalActor) TraverseIterator(sessionID uuid.UUID, iterator *result.Iterator, num int) ([]stackitem.Item, error) {
- return a.neoActor.TraverseIterator(sessionID, iterator, num)
-}
-
-func (a *LocalActor) MakeRun(_ []byte) (*transaction.Transaction, error) {
- panic("unimplemented")
-}
-
-func (a *LocalActor) MakeUnsignedCall(_ util.Uint160, _ string, _ []transaction.Attribute, _ ...any) (*transaction.Transaction, error) {
- panic("unimplemented")
-}
-
-func (a *LocalActor) MakeUnsignedRun(_ []byte, _ []transaction.Attribute) (*transaction.Transaction, error) {
- panic("unimplemented")
-}
-
-func (a *LocalActor) MakeCall(_ util.Uint160, _ string, _ ...any) (*transaction.Transaction, error) {
- panic("unimplemented")
-}
-
-func (a *LocalActor) GetRPCInvoker() invoker.RPCInvoke {
- return a.rpcInvoker
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
deleted file mode 100644
index 64d1c6393..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package helper
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/spf13/viper"
-)
-
-func getFrostfsIDAdminFromContract(roInvoker *invoker.Invoker) (util.Uint160, bool, error) {
- r := management.NewReader(roInvoker)
- cs, err := GetContractByID(r, 1)
- if err != nil {
- return util.Uint160{}, false, fmt.Errorf("get nns contract: %w", err)
- }
- fidHash, err := NNSResolveHash(roInvoker, cs.Hash, DomainOf(constants.FrostfsIDContract))
- if err != nil {
- return util.Uint160{}, false, fmt.Errorf("resolve frostfsid contract hash: %w", err)
- }
- item, err := unwrap.Item(roInvoker.Call(fidHash, "getAdmin"))
- if err != nil {
- return util.Uint160{}, false, fmt.Errorf("getAdmin: %w", err)
- }
- if _, ok := item.(stackitem.Null); ok {
- return util.Uint160{}, false, nil
- }
-
- bs, err := item.TryBytes()
- if err != nil {
- return util.Uint160{}, true, fmt.Errorf("getAdmin: decode result: %w", err)
- }
- h, err := util.Uint160DecodeBytesBE(bs)
- if err != nil {
- return util.Uint160{}, true, fmt.Errorf("getAdmin: decode result: %w", err)
- }
- return h, true, nil
-}
-
-func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any, method string) ([]any, error) {
- items := make([]any, 0, 6)
-
- switch ctrName {
- case constants.FrostfsContract:
- items = append(items,
- c.Contracts[constants.ProcessingContract].Hash,
- keysParam,
- smartcontract.Parameter{})
- case constants.ProcessingContract:
- items = append(items, c.Contracts[constants.FrostfsContract].Hash)
- return items[1:], nil // no notary info
- case constants.BalanceContract:
- items = append(items,
- c.Contracts[constants.NetmapContract].Hash,
- c.Contracts[constants.ContainerContract].Hash)
- case constants.ContainerContract:
- // In case if NNS is updated multiple times, we can't calculate
- // it's actual hash based on local data, thus query chain.
- r := management.NewReader(c.ReadOnlyInvoker)
- nnsCs, err := GetContractByID(r, 1)
- if err != nil {
- return nil, fmt.Errorf("get nns contract: %w", err)
- }
- items = append(items,
- c.Contracts[constants.NetmapContract].Hash,
- c.Contracts[constants.BalanceContract].Hash,
- c.Contracts[constants.FrostfsIDContract].Hash,
- nnsCs.Hash,
- "container")
- case constants.FrostfsIDContract:
- var (
- h util.Uint160
- found bool
- err error
- )
- if method == constants.UpdateMethodName {
- h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
- }
- if method != constants.UpdateMethodName || err == nil && !found {
- h, found, err = getFrostfsIDAdmin(viper.GetViper())
- }
- if err != nil {
- return nil, err
- }
-
- if found {
- items = append(items, h)
- } else {
- items = append(items, c.Contracts[constants.ProxyContract].Hash)
- }
- case constants.NetmapContract:
- md := GetDefaultNetmapContractConfigMap()
- if method == constants.UpdateMethodName {
- if err := MergeNetmapConfig(c.ReadOnlyInvoker, md); err != nil {
- return nil, err
- }
- }
-
- var configParam []any
- for k, v := range md {
- configParam = append(configParam, k, v)
- }
-
- items = append(items,
- c.Contracts[constants.BalanceContract].Hash,
- c.Contracts[constants.ContainerContract].Hash,
- keysParam,
- configParam)
- case constants.ProxyContract:
- items = nil
- case constants.PolicyContract:
- items = append(items, c.Contracts[constants.ProxyContract].Hash)
- default:
- panic("invalid contract name: " + ctrName)
- }
- return items, nil
-}
-
-func GetContractDeployParameters(cs *ContractState, deployData []any) []any {
- return []any{cs.RawNEF, cs.RawManifest, deployData}
-}
-
-func DeployNNS(c *InitializeContext, method string) error {
- cs := c.GetContract(constants.NNSContract)
- h := cs.Hash
-
- nnsCs, err := c.NNSContractState()
- if err != nil {
- return err
- }
- if nnsCs != nil {
- if nnsCs.NEF.Checksum == cs.NEF.Checksum {
- if method == constants.DeployMethodName {
- c.Command.Println("NNS contract is already deployed.")
- } else {
- c.Command.Println("NNS contract is already updated.")
- }
- return nil
- }
- h = nnsCs.Hash
- }
-
- err = AddManifestGroup(c.ContractWallet, h, cs)
- if err != nil {
- return fmt.Errorf("can't sign manifest group: %v", err)
- }
-
- params := GetContractDeployParameters(cs, nil)
-
- invokeHash := management.Hash
- if method == constants.UpdateMethodName {
- invokeHash = nnsCs.Hash
- }
-
- tx, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
- if err != nil {
- return fmt.Errorf("failed to create deploy tx for %s: %w", constants.NNSContract, err)
- }
-
- if err := c.MultiSignAndSend(tx, constants.CommitteeAccountName); err != nil {
- return fmt.Errorf("can't send deploy transaction: %w", err)
- }
-
- c.Command.Println("NNS hash:", invokeHash.StringLE())
- return c.AwaitTx()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/download.go b/cmd/frostfs-adm/internal/modules/morph/helper/download.go
deleted file mode 100644
index 71528a5db..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/download.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package helper
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "time"
-
- "code.gitea.io/sdk/gitea"
- "github.com/spf13/cobra"
-)
-
-var errNoReleasesFound = errors.New("attempt to fetch contracts archive from the offitial repository failed: no releases found")
-
-func downloadContracts(cmd *cobra.Command, url string) (io.ReadCloser, error) {
- cmd.Printf("Downloading contracts archive from '%s'\n", url)
-
- // HTTP client with connect timeout
- client := http.Client{
- Transport: &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: 10 * time.Second,
- }).DialContext,
- },
- }
-
- ctx, cancel := context.WithTimeout(cmd.Context(), 60*time.Second)
- defer cancel()
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
- if err != nil {
- return nil, fmt.Errorf("can't create request: %w", err)
- }
-
- resp, err := client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("can't fetch contracts archive: %w", err)
- }
- return resp.Body, nil
-}
-
-func downloadContractsFromRepository(cmd *cobra.Command) (io.ReadCloser, error) {
- client, err := gitea.NewClient("https://git.frostfs.info")
- if err != nil {
- return nil, fmt.Errorf("can't initialize repository client: %w", err)
- }
-
- releases, _, err := client.ListReleases("TrueCloudLab", "frostfs-contract", gitea.ListReleasesOptions{})
- if err != nil {
- return nil, fmt.Errorf("can't fetch release information: %w", err)
- }
-
- var latestRelease *gitea.Release
- for _, r := range releases {
- if !r.IsDraft && !r.IsPrerelease {
- latestRelease = r
- break
- }
- }
-
- if latestRelease == nil {
- return nil, errNoReleasesFound
- }
-
- cmd.Printf("Found release %s (%s)\n", latestRelease.TagName, latestRelease.Title)
-
- var url string
- for _, a := range latestRelease.Attachments {
- if strings.HasPrefix(a.Name, "frostfs-contract") {
- url = a.DownloadURL
- break
- }
- }
- if url == "" {
- return nil, errors.New("can't find contracts archive in the latest release")
- }
-
- return downloadContracts(cmd, url)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
deleted file mode 100644
index fce2dfb74..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package helper
-
-import (
- "fmt"
-
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/spf13/viper"
-)
-
-const frostfsIDAdminConfigKey = "frostfsid.admin"
-
-func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
- admin := v.GetString(frostfsIDAdminConfigKey)
- if admin == "" {
- return util.Uint160{}, false, nil
- }
-
- h, err := address.StringToUint160(admin)
- if err == nil {
- return h, true, nil
- }
-
- h, err = util.Uint160DecodeStringLE(admin)
- if err == nil {
- return h, true, nil
- }
-
- pk, err := keys.NewPublicKeyFromString(admin)
- if err == nil {
- return pk.GetScriptHash(), true, nil
- }
- return util.Uint160{}, true, fmt.Errorf("frostfsid: admin is invalid: '%s'", admin)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
deleted file mode 100644
index 38991e962..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package helper
-
-import (
- "encoding/hex"
- "testing"
-
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/require"
-)
-
-func TestFrostfsIDConfig(t *testing.T) {
- pks := make([]*keys.PrivateKey, 4)
- for i := range pks {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- pks[i] = pk
- }
-
- fmts := []string{
- pks[0].GetScriptHash().StringLE(),
- address.Uint160ToString(pks[1].GetScriptHash()),
- hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
- hex.EncodeToString(pks[3].PublicKey().Bytes()),
- }
-
- for i := range fmts {
- v := viper.New()
- v.Set("frostfsid.admin", fmts[i])
-
- actual, found, err := getFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, pks[i].GetScriptHash(), actual)
- }
-
- t.Run("bad key", func(t *testing.T) {
- v := viper.New()
- v.Set("frostfsid.admin", "abc")
-
- _, found, err := getFrostfsIDAdmin(v)
- require.Error(t, err)
- require.True(t, found)
- })
- t.Run("missing key", func(t *testing.T) {
- v := viper.New()
-
- _, found, err := getFrostfsIDAdmin(v)
- require.NoError(t, err)
- require.False(t, found)
- })
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/group.go b/cmd/frostfs-adm/internal/modules/morph/helper/group.go
deleted file mode 100644
index 10a164651..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/group.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package helper
-
-import (
- "encoding/json"
-
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
-)
-
-func AddManifestGroup(cw *wallet.Wallet, h util.Uint160, cs *ContractState) error {
- priv := cw.Accounts[0].PrivateKey()
- pub := priv.PublicKey()
-
- sig := priv.Sign(h.BytesBE())
- found := false
-
- for i := range cs.Manifest.Groups {
- if cs.Manifest.Groups[i].PublicKey.Equal(pub) {
- cs.Manifest.Groups[i].Signature = sig
- found = true
- break
- }
- }
- if !found {
- cs.Manifest.Groups = append(cs.Manifest.Groups, manifest.Group{
- PublicKey: pub,
- Signature: sig,
- })
- }
-
- data, err := json.Marshal(cs.Manifest)
- if err != nil {
- return err
- }
-
- cs.RawManifest = data
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
deleted file mode 100644
index 50b5c1ec7..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package helper
-
-import (
- "errors"
- "fmt"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var ErrTooManyAlphabetNodes = fmt.Errorf("too many alphabet nodes (maximum allowed is %d)", constants.MaxAlphabetNodes)
-
-func AwaitTx(cmd *cobra.Command, c Client, txs []HashVUBPair) error {
- cmd.Println("Waiting for transactions to persist...")
-
- at := trigger.Application
-
- var retErr error
-
-loop:
- for i := range txs {
- var it int
- var pollInterval time.Duration
- var pollIntervalChanged bool
- for {
- // We must fetch current height before application log, to avoid race condition.
- currBlock, err := c.GetBlockCount()
- if err != nil {
- return fmt.Errorf("can't fetch current block height: %w", err)
- }
- res, err := c.GetApplicationLog(txs[i].Hash, &at)
- if err == nil {
- if retErr == nil && len(res.Executions) > 0 && res.Executions[0].VMState != vmstate.Halt {
- retErr = fmt.Errorf("tx %d persisted in %s state: %s",
- i, res.Executions[0].VMState, res.Executions[0].FaultException)
- }
- continue loop
- }
- if txs[i].Vub < currBlock {
- return fmt.Errorf("tx was not persisted: Vub=%d, height=%d", txs[i].Vub, currBlock)
- }
-
- pollInterval, pollIntervalChanged = NextPollInterval(it, pollInterval)
- if pollIntervalChanged && viper.GetBool(commonflags.Verbose) {
- cmd.Printf("Pool interval to check transaction persistence changed: %s\n", pollInterval.String())
- }
-
- timer := time.NewTimer(pollInterval)
- select {
- case <-cmd.Context().Done():
- return cmd.Context().Err()
- case <-timer.C:
- }
-
- it++
- }
- }
-
- return retErr
-}
-
-func NextPollInterval(it int, previous time.Duration) (time.Duration, bool) {
- const minPollInterval = 1 * time.Second
- const maxPollInterval = 16 * time.Second
- const changeAfter = 5
- if it == 0 {
- return minPollInterval, true
- }
- if it%changeAfter != 0 {
- return previous, false
- }
- nextInterval := previous * 2
- if nextInterval > maxPollInterval {
- return maxPollInterval, previous != maxPollInterval
- }
- return nextInterval, true
-}
-
-func GetWalletAccount(w *wallet.Wallet, typ string) (*wallet.Account, error) {
- for i := range w.Accounts {
- if w.Accounts[i].Label == typ {
- return w.Accounts[i], nil
- }
- }
- return nil, fmt.Errorf("account for '%s' not found", typ)
-}
-
-func GetComitteAcc(cmd *cobra.Command, v *viper.Viper) *wallet.Account {
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- wallets, err := GetAlphabetWallets(v, walletDir)
- commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
-
- committeeAcc, err := GetWalletAccount(wallets[0], constants.CommitteeAccountName)
- commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err)
- return committeeAcc
-}
-
-func NNSResolve(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
- return unwrap.Item(inv.Call(nnsHash, "resolve", domain, int64(nns.TXT)))
-}
-
-// ParseNNSResolveResult parses the result of resolving NNS record.
-// It works with multiple formats (corresponding to multiple NNS versions).
-// If array of hashes is provided, it returns only the first one.
-func ParseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
- arr, ok := res.Value().([]stackitem.Item)
- if !ok {
- arr = []stackitem.Item{res}
- }
- if _, ok := res.Value().(stackitem.Null); ok || len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
- }
- for i := range arr {
- bs, err := arr[i].TryBytes()
- if err != nil {
- continue
- }
-
- // We support several formats for hash encoding, this logic should be maintained in sync
- // with NNSResolve from pkg/morph/client/nns.go
- h, err := util.Uint160DecodeStringLE(string(bs))
- if err == nil {
- return h, nil
- }
-
- h, err = address.StringToUint160(string(bs))
- if err == nil {
- return h, nil
- }
- }
- return util.Uint160{}, errors.New("no valid hashes are found")
-}
-
-// NNSResolveHash Returns errMissingNNSRecord if invocation fault exception contains "token not found".
-func NNSResolveHash(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (util.Uint160, error) {
- item, err := NNSResolve(inv, nnsHash, domain)
- if err != nil {
- return util.Uint160{}, err
- }
- return ParseNNSResolveResult(item)
-}
-
-func DomainOf(contract string) string {
- return contract + ".frostfs"
-}
-
-func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*keys.PublicKey, error) {
- res, err := NNSResolve(inv, nnsHash, domain)
- if err != nil {
- return nil, err
- }
- if _, ok := res.Value().(stackitem.Null); ok {
- return nil, errors.New("NNS record is missing")
- }
- arr, ok := res.Value().([]stackitem.Item)
- if !ok {
- return nil, errors.New("API of the NNS contract method `resolve` has changed")
- }
- for i := range arr {
- var bs []byte
- bs, err = arr[i].TryBytes()
- if err != nil {
- continue
- }
-
- return keys.NewPublicKeyFromString(string(bs))
- }
- return nil, errors.New("no valid keys are found")
-}
-
-func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
- inv := invoker.New(c, nil)
- reader := nns2.NewReader(inv, nnsHash)
- return reader.IsAvailable(name)
-}
-
-func CheckNotaryEnabled(c Client) error {
- ns, err := c.GetNativeContracts()
- if err != nil {
- return fmt.Errorf("can't get native contract hashes: %w", err)
- }
-
- notaryEnabled := false
- nativeHashes := make(map[string]util.Uint160, len(ns))
- for i := range ns {
- if ns[i].Manifest.Name == nativenames.Notary {
- notaryEnabled = true
- }
- nativeHashes[ns[i].Manifest.Name] = ns[i].Hash
- }
- if !notaryEnabled {
- return errors.New("notary contract must be enabled")
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
deleted file mode 100644
index da5ffedae..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go
+++ /dev/null
@@ -1,544 +0,0 @@
-package helper
-
-import (
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- io2 "io"
- "os"
- "path/filepath"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/nef"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- errNegativeDuration = errors.New("epoch duration must be positive")
- errNegativeSize = errors.New("max object size must be positive")
-)
-
-type ContractState struct {
- NEF *nef.File
- RawNEF []byte
- Manifest *manifest.Manifest
- RawManifest []byte
- Hash util.Uint160
-}
-
-type Cache struct {
- NNSCs *state.Contract
- GroupKey *keys.PublicKey
-}
-
-type InitializeContext struct {
- ClientContext
- Cache
- // CommitteeAcc is used for retrieving the committee address and the verification script.
- CommitteeAcc *wallet.Account
- // ConsensusAcc is used for retrieving the committee address and the verification script.
- ConsensusAcc *wallet.Account
- Wallets []*wallet.Wallet
- // ContractWallet is a wallet for providing the contract group signature.
- ContractWallet *wallet.Wallet
- // Accounts contains simple signature accounts in the same order as in Wallets.
- Accounts []*wallet.Account
- Contracts map[string]*ContractState
- Command *cobra.Command
- ContractPath string
- ContractURL string
-}
-
-func (cs *ContractState) Parse() error {
- nf, err := nef.FileFromBytes(cs.RawNEF)
- if err != nil {
- return fmt.Errorf("can't parse NEF file: %w", err)
- }
-
- m := new(manifest.Manifest)
- if err := json.Unmarshal(cs.RawManifest, m); err != nil {
- return fmt.Errorf("can't parse manifest file: %w", err)
- }
-
- cs.NEF = &nf
- cs.Manifest = m
- return nil
-}
-
-func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContext, error) {
- walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
- wallets, err := GetAlphabetWallets(v, walletDir)
- if err != nil {
- return nil, err
- }
-
- needContracts := cmd.Name() == "update-contracts" || cmd.Name() == "init"
-
- var w *wallet.Wallet
- w, err = getWallet(cmd, v, needContracts, walletDir)
- if err != nil {
- return nil, err
- }
-
- c, err := createClient(cmd, v, wallets)
- if err != nil {
- return nil, err
- }
-
- committeeAcc, err := GetWalletAccount(wallets[0], constants.CommitteeAccountName)
- if err != nil {
- return nil, fmt.Errorf("can't find committee account: %w", err)
- }
-
- consensusAcc, err := GetWalletAccount(wallets[0], constants.ConsensusAccountName)
- if err != nil {
- return nil, fmt.Errorf("can't find consensus account: %w", err)
- }
-
- if err := validateInit(cmd); err != nil {
- return nil, err
- }
-
- ctrPath, err := getContractsPath(cmd, needContracts)
- if err != nil {
- return nil, err
- }
-
- var ctrURL string
- if needContracts {
- ctrURL, _ = cmd.Flags().GetString(commonflags.ContractsURLFlag)
- }
-
- if err := CheckNotaryEnabled(c); err != nil {
- return nil, err
- }
-
- accounts, err := getSingleAccounts(wallets)
- if err != nil {
- return nil, err
- }
-
- cliCtx, err := defaultClientContext(c, committeeAcc)
- if err != nil {
- return nil, fmt.Errorf("client context: %w", err)
- }
-
- initCtx := &InitializeContext{
- ClientContext: *cliCtx,
- ConsensusAcc: consensusAcc,
- CommitteeAcc: committeeAcc,
- ContractWallet: w,
- Wallets: wallets,
- Accounts: accounts,
- Command: cmd,
- Contracts: make(map[string]*ContractState),
- ContractPath: ctrPath,
- ContractURL: ctrURL,
- }
-
- if needContracts {
- err := readContracts(initCtx, constants.FullContractList)
- if err != nil {
- return nil, err
- }
- }
-
- return initCtx, nil
-}
-
-func validateInit(cmd *cobra.Command) error {
- if cmd.Name() != "init" {
- return nil
- }
- if viper.GetInt64(commonflags.EpochDurationInitFlag) <= 0 {
- return errNegativeDuration
- }
-
- if viper.GetInt64(commonflags.MaxObjectSizeInitFlag) <= 0 {
- return errNegativeSize
- }
-
- return nil
-}
-
-func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (Client, error) {
- var c Client
- var err error
- if ldf := cmd.Flags().Lookup(commonflags.LocalDumpFlag); ldf != nil && ldf.Changed {
- if cmd.Flags().Changed(commonflags.EndpointFlag) {
- return nil, fmt.Errorf("`%s` and `%s` flags are mutually exclusive", commonflags.EndpointFlag, commonflags.LocalDumpFlag)
- }
- c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
- } else {
- c, err = NewRemoteClient(v)
- }
- if err != nil {
- return nil, fmt.Errorf("can't create N3 client: %w", err)
- }
- return c, nil
-}
-
-func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
- if !needContracts {
- return "", nil
- }
-
- ctrPath, err := cmd.Flags().GetString(commonflags.ContractsInitFlag)
- if err != nil {
- return "", fmt.Errorf("invalid contracts path: %w", err)
- }
- return ctrPath, nil
-}
-
-func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
- accounts := make([]*wallet.Account, len(wallets))
- for i, w := range wallets {
- acc, err := GetWalletAccount(w, constants.SingleAccountName)
- if err != nil {
- return nil, fmt.Errorf("wallet %s is invalid (no single account): %w", w.Path(), err)
- }
- accounts[i] = acc
- }
- return accounts, nil
-}
-
-func readContracts(c *InitializeContext, names []string) error {
- var (
- fi os.FileInfo
- err error
- )
- if c.ContractPath != "" {
- fi, err = os.Stat(c.ContractPath)
- if err != nil {
- return fmt.Errorf("invalid contracts path: %w", err)
- }
- }
-
- if c.ContractPath != "" && fi.IsDir() {
- for _, ctrName := range names {
- cs, err := ReadContract(filepath.Join(c.ContractPath, ctrName), ctrName)
- if err != nil {
- return err
- }
- c.Contracts[ctrName] = cs
- }
- } else {
- var r io2.ReadCloser
- if c.ContractPath != "" {
- r, err = os.Open(c.ContractPath)
- } else if c.ContractURL != "" {
- r, err = downloadContracts(c.Command, c.ContractURL)
- } else {
- r, err = downloadContractsFromRepository(c.Command)
- }
- if err != nil {
- return fmt.Errorf("can't open contracts archive: %w", err)
- }
- defer r.Close()
-
- m, err := readContractsFromArchive(r, names)
- if err != nil {
- return err
- }
- for _, name := range names {
- if err := m[name].Parse(); err != nil {
- return err
- }
- c.Contracts[name] = m[name]
- }
- }
-
- for _, ctrName := range names {
- if ctrName != constants.AlphabetContract {
- cs := c.Contracts[ctrName]
- cs.Hash = state.CreateContractHash(c.CommitteeAcc.Contract.ScriptHash(),
- cs.NEF.Checksum, cs.Manifest.Name)
- }
- }
- return nil
-}
-
-func (c *InitializeContext) Close() {
- if local, ok := c.Client.(*LocalClient); ok {
- err := local.Dump()
- if err != nil {
- c.Command.PrintErrf("Can't write dump: %v\n", err)
- os.Exit(1)
- }
- }
-}
-
-func (c *InitializeContext) AwaitTx() error {
- return c.ClientContext.AwaitTx(c.Command)
-}
-
-func (c *InitializeContext) NNSContractState() (*state.Contract, error) {
- if c.NNSCs != nil {
- return c.NNSCs, nil
- }
-
- r := management.NewReader(c.ReadOnlyInvoker)
- cs, err := r.GetContractByID(1)
- if err != nil {
- return nil, err
- }
-
- c.NNSCs = cs
- return cs, nil
-}
-
-func (c *InitializeContext) GetSigner(tryGroup bool, acc *wallet.Account) transaction.Signer {
- if tryGroup && c.GroupKey != nil {
- return transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.CustomGroups,
- AllowedGroups: keys.PublicKeys{c.GroupKey},
- }
- }
-
- signer := transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.Global, // Scope is important, as we have nested call to container contract.
- }
-
- if !tryGroup {
- return signer
- }
-
- nnsCs, err := c.NNSContractState()
- if err != nil {
- return signer
- }
-
- groupKey, err := NNSResolveKey(c.ReadOnlyInvoker, nnsCs.Hash, client.NNSGroupKeyName)
- if err == nil {
- c.GroupKey = groupKey
-
- signer.Scopes = transaction.CustomGroups
- signer.AllowedGroups = keys.PublicKeys{groupKey}
- }
- return signer
-}
-
-// SendCommitteeTx creates transaction from script, signs it by committee nodes and sends it to RPC.
-// If tryGroup is false, global scope is used for the signer (useful when
-// working with native contracts).
-func (c *InitializeContext) SendCommitteeTx(script []byte, tryGroup bool) error {
- return c.sendMultiTx(script, tryGroup, false)
-}
-
-// SendConsensusTx creates transaction from script, signs it by alphabet nodes and sends it to RPC.
-// Not that because this is used only after the contracts were initialized and deployed,
-// we always try to have a group scope.
-func (c *InitializeContext) SendConsensusTx(script []byte) error {
- return c.sendMultiTx(script, true, true)
-}
-
-func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsensus bool) error {
- var act *actor.Actor
- var err error
-
- withConsensus = withConsensus && !c.ConsensusAcc.Contract.ScriptHash().Equals(c.CommitteeAcc.ScriptHash())
- if tryGroup {
- // Even for consensus signatures we need the committee to pay.
- signers := make([]actor.SignerAccount, 1, 2)
- signers[0] = actor.SignerAccount{
- Signer: c.GetSigner(tryGroup, c.CommitteeAcc),
- Account: c.CommitteeAcc,
- }
- if withConsensus {
- signers = append(signers, actor.SignerAccount{
- Signer: c.GetSigner(tryGroup, c.ConsensusAcc),
- Account: c.ConsensusAcc,
- })
- }
- act, err = actor.New(c.Client, signers)
- } else {
- assert.False(withConsensus, "BUG: should never happen")
- act, err = c.CommitteeAct, nil
- }
- if err != nil {
- return fmt.Errorf("could not create actor: %w", err)
- }
-
- tx, err := act.MakeUnsignedRun(script, []transaction.Attribute{{Type: transaction.HighPriority}})
- if err != nil {
- return fmt.Errorf("could not perform test invocation: %w", err)
- }
-
- if err := c.MultiSign(tx, constants.CommitteeAccountName); err != nil {
- return err
- }
- if withConsensus {
- if err := c.MultiSign(tx, constants.ConsensusAccountName); err != nil {
- return err
- }
- }
-
- return c.SendTx(tx, c.Command, false)
-}
-
-func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accType string) error {
- if err := c.MultiSign(tx, accType); err != nil {
- return err
- }
-
- return c.SendTx(tx, c.Command, false)
-}
-
-func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
- version, err := c.Client.GetVersion()
- // error appears only if client
- // has not been initialized
- assert.NoError(err)
- network := version.Protocol.Network
-
- // Use parameter context to avoid dealing with signature order.
- pc := context.NewParameterContext("", network, tx)
- h := c.CommitteeAcc.Contract.ScriptHash()
- if accType == constants.ConsensusAccountName {
- h = c.ConsensusAcc.Contract.ScriptHash()
- }
- for _, w := range c.Wallets {
- acc, err := GetWalletAccount(w, accType)
- if err != nil {
- return fmt.Errorf("can't find %s wallet account: %w", accType, err)
- }
-
- priv := acc.PrivateKey()
- sign := priv.SignHashable(uint32(network), tx)
- if err := pc.AddSignature(h, acc.Contract, priv.PublicKey(), sign); err != nil {
- return fmt.Errorf("can't add signature: %w", err)
- }
- if len(pc.Items[h].Signatures) == len(acc.Contract.Parameters) {
- break
- }
- }
-
- w, err := pc.GetWitness(h)
- if err != nil {
- return fmt.Errorf("incomplete signature: %w", err)
- }
-
- for i := range tx.Signers {
- if tx.Signers[i].Account == h {
- assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
- if i < len(tx.Scripts) {
- tx.Scripts[i] = *w
- }
- if i == len(tx.Scripts) {
- tx.Scripts = append(tx.Scripts, *w)
- }
- return nil
- }
- }
-
- return fmt.Errorf("%s account was not found among transaction signers", accType)
-}
-
-// EmitUpdateNNSGroupScript emits script for updating group key stored in NNS.
-// First return value is true iff the key is already there and nothing should be done.
-// Second return value is true iff a domain registration code was emitted.
-func (c *InitializeContext) EmitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHash util.Uint160, pub *keys.PublicKey) (bool, bool, error) {
- isAvail, err := NNSIsAvailable(c.Client, nnsHash, client.NNSGroupKeyName)
- if err != nil {
- return false, false, err
- }
-
- if !isAvail {
- currentPub, err := NNSResolveKey(c.ReadOnlyInvoker, nnsHash, client.NNSGroupKeyName)
- if err != nil {
- return false, false, err
- }
-
- if pub.Equal(currentPub) {
- return true, false, nil
- }
- }
-
- if isAvail {
- emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
- client.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(),
- constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal,
- int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- }
-
- emit.AppCall(bw.BinWriter, nnsHash, "deleteRecords", callflag.All, "group.frostfs", int64(nns.TXT))
- emit.AppCall(bw.BinWriter, nnsHash, "addRecord", callflag.All,
- "group.frostfs", int64(nns.TXT), hex.EncodeToString(pub.Bytes()))
-
- return false, isAvail, nil
-}
-
-func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.Uint160, domain string) ([]byte, bool, error) {
- ok, err := NNSIsAvailable(c.Client, nnsHash, domain)
- if err != nil {
- return nil, false, err
- }
-
- if ok {
- bw := io.NewBufBinWriter()
- emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
- domain, c.CommitteeAcc.Contract.ScriptHash(),
- constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal,
- int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
-
- assert.NoError(bw.Err)
- return bw.Bytes(), false, nil
- }
-
- s, err := NNSResolveHash(c.ReadOnlyInvoker, nnsHash, domain)
- if err != nil {
- return nil, false, err
- }
- return nil, s == expectedHash, nil
-}
-
-func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
- avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
- return !avail, err
-}
-
-func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {
- r := management.NewReader(c.ReadOnlyInvoker)
- realCs, err := r.GetContract(ctrHash)
- return err == nil && realCs != nil && realCs.NEF.Checksum == cs.NEF.Checksum
-}
-
-func (c *InitializeContext) GetContract(ctrName string) *ContractState {
- return c.Contracts[ctrName]
-}
-
-func (c *InitializeContext) GetAlphabetDeployItems(i, n int) []any {
- items := make([]any, 5)
- items[0] = c.Contracts[constants.NetmapContract].Hash
- items[1] = c.Contracts[constants.ProxyContract].Hash
- items[2] = innerring.GlagoliticLetter(i).String()
- items[3] = int64(i)
- items[4] = int64(n)
- return items
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go
deleted file mode 100644
index f3ce42f51..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package helper
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestNextPollInterval(t *testing.T) {
- var pollInterval time.Duration
- var iteration int
-
- pollInterval, hasChanged := NextPollInterval(iteration, pollInterval)
- require.True(t, hasChanged)
- require.Equal(t, time.Second, pollInterval)
-
- iteration = 4
- pollInterval, hasChanged = NextPollInterval(iteration, pollInterval)
- require.False(t, hasChanged)
- require.Equal(t, time.Second, pollInterval)
-
- iteration = 5
- pollInterval, hasChanged = NextPollInterval(iteration, pollInterval)
- require.True(t, hasChanged)
- require.Equal(t, 2*time.Second, pollInterval)
-
- iteration = 10
- pollInterval, hasChanged = NextPollInterval(iteration, pollInterval)
- require.True(t, hasChanged)
- require.Equal(t, 4*time.Second, pollInterval)
-
- iteration = 20
- pollInterval = 32 * time.Second
- pollInterval, hasChanged = NextPollInterval(iteration, pollInterval)
- require.True(t, hasChanged) // from 32s to 16s
- require.Equal(t, 16*time.Second, pollInterval)
-
- pollInterval = 16 * time.Second
- pollInterval, hasChanged = NextPollInterval(iteration, pollInterval)
- require.False(t, hasChanged)
- require.Equal(t, 16*time.Second, pollInterval)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
deleted file mode 100644
index 46611c177..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
+++ /dev/null
@@ -1,406 +0,0 @@
-package helper
-
-import (
- "crypto/elliptic"
- "errors"
- "fmt"
- "os"
- "sort"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "github.com/google/uuid"
- "github.com/nspcc-dev/neo-go/pkg/config"
- "github.com/nspcc-dev/neo-go/pkg/core"
- "github.com/nspcc-dev/neo-go/pkg/core/block"
- "github.com/nspcc-dev/neo-go/pkg/core/chaindump"
- "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/core/storage"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
- "go.uber.org/zap"
-)
-
-type LocalClient struct {
- bc *core.Blockchain
- transactions []*transaction.Transaction
- dumpPath string
- accounts []*wallet.Account
-}
-
-func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) {
- cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath))
- if err != nil {
- return nil, err
- }
-
- bc, err := core.NewBlockchain(storage.NewMemoryStore(), cfg.Blockchain(), zap.NewNop())
- if err != nil {
- return nil, err
- }
-
- go bc.Run()
-
- accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
- if err != nil {
- return nil, err
- }
-
- if cmd.Name() != "init" {
- if err := restoreDump(bc, dumpPath); err != nil {
- return nil, fmt.Errorf("restore dump: %w", err)
- }
- }
-
- return &LocalClient{
- bc: bc,
- dumpPath: dumpPath,
- accounts: accounts,
- }, nil
-}
-
-func restoreDump(bc *core.Blockchain, dumpPath string) error {
- f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
- if err != nil {
- return fmt.Errorf("can't open local dump: %w", err)
- }
- defer f.Close()
-
- r := io.NewBinReaderFromIO(f)
-
- var skip uint32
- if bc.BlockHeight() != 0 {
- skip = bc.BlockHeight() + 1
- }
-
- count := r.ReadU32LE() - skip
- if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
- return err
- }
- return nil
-}
-
-func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
- accounts := make([]*wallet.Account, len(wallets))
- for i := range accounts {
- acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
- if err != nil {
- return nil, err
- }
- accounts[i] = acc
- }
-
- indexMap := make(map[string]int)
- for i, pub := range cfg.StandbyCommittee {
- indexMap[pub] = i
- }
-
- sort.Slice(accounts, func(i, j int) bool {
- pi := accounts[i].PrivateKey().PublicKey().Bytes()
- pj := accounts[j].PrivateKey().PublicKey().Bytes()
- return indexMap[string(pi)] < indexMap[string(pj)]
- })
- sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
- return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
- })
-
- m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
- return accounts[:m], nil
-}
-
-func (l *LocalClient) GetBlockCount() (uint32, error) {
- return l.bc.BlockHeight(), nil
-}
-
-func (l *LocalClient) GetNativeContracts() ([]state.Contract, error) {
- return l.bc.GetNatives(), nil
-}
-
-func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*result.ApplicationLog, error) {
- aer, err := l.bc.GetAppExecResults(h, *t)
- if err != nil {
- return nil, err
- }
-
- a := result.NewApplicationLog(h, aer, *t)
- return &a, nil
-}
-
-// InvokeFunction is implemented via `InvokeScript`.
-func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
- var err error
-
- pp := make([]any, len(sPrm))
- for i, p := range sPrm {
- pp[i], err = smartcontract.ExpandParameterToEmitable(p)
- if err != nil {
- return nil, fmt.Errorf("incorrect parameter type %s: %w", p.Type, err)
- }
- }
-
- return InvokeFunction(l, h, method, pp, ss)
-}
-
-func (l *LocalClient) TerminateSession(_ uuid.UUID) (bool, error) {
- // not used by `morph init` command
- panic("unexpected call")
-}
-
-func (l *LocalClient) TraverseIterator(_, _ uuid.UUID, _ int) ([]stackitem.Item, error) {
- // not used by `morph init` command
- panic("unexpected call")
-}
-
-// GetVersion return default version.
-func (l *LocalClient) GetVersion() (*result.Version, error) {
- c := l.bc.GetConfig()
- return &result.Version{
- Protocol: result.Protocol{
- AddressVersion: address.NEO3Prefix,
- Network: c.Magic,
- MillisecondsPerBlock: int(c.TimePerBlock / time.Millisecond),
- MaxTraceableBlocks: c.MaxTraceableBlocks,
- MaxValidUntilBlockIncrement: c.MaxValidUntilBlockIncrement,
- MaxTransactionsPerBlock: c.MaxTransactionsPerBlock,
- MemoryPoolMaxTransactions: c.MemPoolSize,
- ValidatorsCount: byte(c.ValidatorsCount),
- InitialGasDistribution: c.InitialGASSupply,
- CommitteeHistory: c.CommitteeHistory,
- P2PSigExtensions: c.P2PSigExtensions,
- StateRootInHeader: c.StateRootInHeader,
- ValidatorsHistory: c.ValidatorsHistory,
- },
- }, nil
-}
-
-func (l *LocalClient) InvokeContractVerify(util.Uint160, []smartcontract.Parameter, []transaction.Signer, ...transaction.Witness) (*result.Invoke, error) {
- // not used by `morph init` command
- panic("unexpected call")
-}
-
-// CalculateNetworkFee calculates network fee for the given transaction.
-// Copied from neo-go with minor corrections (no need to support non-notary mode):
-// https://github.com/nspcc-dev/neo-go/blob/v0.103.0/pkg/services/rpcsrv/server.go#L911
-func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) {
- // Avoid setting hash for this tx: server code doesn't touch client transaction.
- data := tx.Bytes()
- tx, err := transaction.NewTransactionFromBytes(data)
- if err != nil {
- return 0, err
- }
-
- hashablePart, err := tx.EncodeHashableFields()
- if err != nil {
- return 0, err
- }
- size := len(hashablePart) + io.GetVarSize(len(tx.Signers))
- var (
- netFee int64
- // Verification GAS cost can't exceed this policy.
- gasLimit = l.bc.GetMaxVerificationGAS()
- )
- for i, signer := range tx.Signers {
- w := tx.Scripts[i]
- if len(w.InvocationScript) == 0 { // No invocation provided, try to infer one.
- var paramz []manifest.Parameter
- if len(w.VerificationScript) == 0 { // Contract-based verification
- cs := l.bc.GetContractState(signer.Account)
- if cs == nil {
- return 0, fmt.Errorf("signer %d has no verification script and no deployed contract", i)
- }
- md := cs.Manifest.ABI.GetMethod(manifest.MethodVerify, -1)
- if md == nil || md.ReturnType != smartcontract.BoolType {
- return 0, fmt.Errorf("signer %d has no verify method in deployed contract", i)
- }
- paramz = md.Parameters // Might as well have none params and it's OK.
- } else { // Regular signature verification.
- if vm.IsSignatureContract(w.VerificationScript) {
- paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
- } else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
- paramz = make([]manifest.Parameter, nSigs)
- for j := range nSigs {
- paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
- }
- }
- }
- inv := io.NewBufBinWriter()
- for _, p := range paramz {
- p.Type.EncodeDefaultValue(inv.BinWriter)
- }
- if inv.Err != nil {
- return 0, fmt.Errorf("failed to create dummy invocation script (signer %d): %s", i, inv.Err.Error())
- }
- w.InvocationScript = inv.Bytes()
- }
- gasConsumed, err := l.bc.VerifyWitness(signer.Account, tx, &w, gasLimit)
- if err != nil && !errors.Is(err, core.ErrInvalidSignature) {
- return 0, err
- }
- gasLimit -= gasConsumed
- netFee += gasConsumed
- size += io.GetVarSize(w.VerificationScript) + io.GetVarSize(w.InvocationScript)
- }
- if l.bc.P2PSigExtensionsEnabled() {
- attrs := tx.GetAttributes(transaction.NotaryAssistedT)
- if len(attrs) != 0 {
- na := attrs[0].Value.(*transaction.NotaryAssisted)
- netFee += (int64(na.NKeys) + 1) * l.bc.GetNotaryServiceFeePerKey()
- }
- }
- fee := l.bc.FeePerByte()
- netFee += int64(size) * fee
- return netFee, nil
-}
-
-func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer) (*result.Invoke, error) {
- lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
- if err != nil {
- return nil, err
- }
-
- tx := transaction.New(script, 0)
- tx.Signers = signers
- tx.ValidUntilBlock = l.bc.BlockHeight() + 2
-
- ic, err := l.bc.GetTestVM(trigger.Application, tx, &block.Block{
- Header: block.Header{
- Index: lastBlock.Index + 1,
- Timestamp: lastBlock.Timestamp + 1,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("get test VM: %w", err)
- }
-
- ic.VM.GasLimit = 100_0000_0000
- ic.VM.LoadScriptWithFlags(script, callflag.All)
-
- var errStr string
- if err := ic.VM.Run(); err != nil {
- errStr = err.Error()
- }
- return &result.Invoke{
- State: ic.VM.State().String(),
- GasConsumed: ic.VM.GasConsumed(),
- Script: script,
- Stack: ic.VM.Estack().ToArray(),
- FaultException: errStr,
- }, nil
-}
-
-func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
- tx = tx.Copy()
- l.transactions = append(l.transactions, tx)
- return tx.Hash(), nil
-}
-
-func (l *LocalClient) putTransactions() error {
- // 1. Prepare new block.
- lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
- assert.NoError(err)
- defer func() { l.transactions = l.transactions[:0] }()
-
- b := &block.Block{
- Header: block.Header{
- NextConsensus: l.accounts[0].Contract.ScriptHash(),
- Script: transaction.Witness{
- VerificationScript: l.accounts[0].Contract.Script,
- },
- Timestamp: lastBlock.Timestamp + 1,
- },
- Transactions: l.transactions,
- }
-
- if l.bc.GetConfig().StateRootInHeader {
- b.StateRootEnabled = true
- b.PrevStateRoot = l.bc.GetStateModule().CurrentLocalStateRoot()
- }
- b.PrevHash = lastBlock.Hash()
- b.Index = lastBlock.Index + 1
- b.RebuildMerkleRoot()
-
- // 2. Sign prepared block.
- var invocationScript []byte
-
- magic := l.bc.GetConfig().Magic
- for _, acc := range l.accounts {
- sign := acc.PrivateKey().SignHashable(uint32(magic), b)
- invocationScript = append(invocationScript, byte(opcode.PUSHDATA1), 64)
- invocationScript = append(invocationScript, sign...)
- }
- b.Script.InvocationScript = invocationScript
-
- // 3. Persist block.
- return l.bc.AddBlock(b)
-}
-
-func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, signers []transaction.Signer) (*result.Invoke, error) {
- w := io.NewBufBinWriter()
- emit.Array(w.BinWriter, parameters...)
- emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
- assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
- return c.InvokeScript(w.Bytes(), signers)
-}
-
-var errGetDesignatedByRoleResponse = errors.New("`getDesignatedByRole`: invalid response")
-
-func GetDesignatedByRole(inv *invoker.Invoker, h util.Uint160, role noderoles.Role, u uint32) (keys.PublicKeys, error) {
- arr, err := unwrap.Array(inv.Call(h, "getDesignatedByRole", int64(role), int64(u)))
- if err != nil {
- return nil, errGetDesignatedByRoleResponse
- }
-
- pubs := make(keys.PublicKeys, len(arr))
- for i := range arr {
- bs, err := arr[i].TryBytes()
- if err != nil {
- return nil, errGetDesignatedByRoleResponse
- }
- pubs[i], err = keys.NewPublicKeyFromBytes(bs, elliptic.P256())
- if err != nil {
- return nil, errGetDesignatedByRoleResponse
- }
- }
-
- return pubs, nil
-}
-
-func (l *LocalClient) Dump() (err error) {
- defer l.bc.Close()
-
- f, err := os.Create(l.dumpPath)
- if err != nil {
- return err
- }
- defer func() {
- closeErr := f.Close()
- if err == nil && closeErr != nil {
- err = closeErr
- }
- }()
-
- w := io.NewBinWriterFromIO(f)
- w.WriteU32LE(l.bc.BlockHeight() + 1)
- err = chaindump.Dump(l.bc, w, 0, l.bc.BlockHeight()+1)
- return
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
deleted file mode 100644
index 20abaff0a..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package helper
-
-import (
- "errors"
- "fmt"
- "slices"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/spf13/viper"
-)
-
-var NetmapConfigKeys = []string{
- netmap.EpochDurationConfig,
- netmap.MaxObjectSizeConfig,
- netmap.ContainerFeeConfig,
- netmap.ContainerAliasFeeConfig,
- netmap.IrCandidateFeeConfig,
- netmap.WithdrawFeeConfig,
- netmap.HomomorphicHashingDisabledKey,
- netmap.MaintenanceModeAllowedConfig,
-}
-
-var errFailedToFetchListOfNetworkKeys = errors.New("can't fetch list of network config keys from the netmap contract")
-
-func GetDefaultNetmapContractConfigMap() map[string]any {
- m := make(map[string]any)
- m[netmap.EpochDurationConfig] = viper.GetInt64(commonflags.EpochDurationInitFlag)
- m[netmap.MaxObjectSizeConfig] = viper.GetInt64(commonflags.MaxObjectSizeInitFlag)
- m[netmap.MaxECDataCountConfig] = viper.GetInt64(commonflags.MaxECDataCountFlag)
- m[netmap.MaxECParityCountConfig] = viper.GetInt64(commonflags.MaxECParityCounFlag)
- m[netmap.ContainerFeeConfig] = viper.GetInt64(commonflags.ContainerFeeInitFlag)
- m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(commonflags.ContainerAliasFeeInitFlag)
- m[netmap.IrCandidateFeeConfig] = viper.GetInt64(commonflags.CandidateFeeInitFlag)
- m[netmap.WithdrawFeeConfig] = viper.GetInt64(commonflags.WithdrawFeeInitFlag)
- m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(commonflags.HomomorphicHashDisabledInitFlag)
- m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(commonflags.MaintenanceModeAllowedInitFlag)
- return m
-}
-
-func ParseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) {
- m := make(map[string][]byte, len(arr))
- for _, param := range arr {
- tuple, ok := param.Value().([]stackitem.Item)
- if !ok || len(tuple) != 2 {
- return nil, errors.New("invalid ListConfig response from netmap contract")
- }
-
- k, err := tuple[0].TryBytes()
- if err != nil {
- return nil, errors.New("invalid config key from netmap contract")
- }
-
- v, err := tuple[1].TryBytes()
- if err != nil {
- return nil, InvalidConfigValueErr(string(k))
- }
- m[string(k)] = v
- }
- return m, nil
-}
-
-func InvalidConfigValueErr(key string) error {
- return fmt.Errorf("invalid %s config value from netmap contract", key)
-}
-
-func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error {
- if countEpoch <= 0 {
- return errors.New("number of epochs cannot be less than 1")
- }
-
- curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
- if err != nil {
- return errors.New("can't fetch current epoch from the netmap contract")
- }
-
- newEpoch := curr + countEpoch
- wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
-
- // In NeoFS this is done via Notary contract. Here, however, we can form the
- // transaction locally.
- emit.AppCall(bw.BinWriter, nmHash, "newEpoch", callflag.All, newEpoch)
- return bw.Err
-}
-
-func GetNetConfigFromNetmapContract(roInvoker *invoker.Invoker) ([]stackitem.Item, error) {
- r := management.NewReader(roInvoker)
- cs, err := GetContractByID(r, 1)
- if err != nil {
- return nil, fmt.Errorf("get nns contract: %w", err)
- }
- nmHash, err := NNSResolveHash(roInvoker, cs.Hash, DomainOf(constants.NetmapContract))
- if err != nil {
- return nil, fmt.Errorf("can't get netmap contract hash: %w", err)
- }
- arr, err := unwrap.Array(roInvoker.Call(nmHash, "listConfig"))
- if err != nil {
- return nil, errFailedToFetchListOfNetworkKeys
- }
- return arr, err
-}
-
-func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
- arr, err := GetNetConfigFromNetmapContract(roInvoker)
- if err != nil {
- return err
- }
- m, err := ParseConfigFromNetmapContract(arr)
- if err != nil {
- return err
- }
- for k, v := range m {
- if slices.Contains(NetmapConfigKeys, k) {
- md[k] = v
- }
- }
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
deleted file mode 100644
index be6b2c6dd..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package helper
-
-import (
- "archive/tar"
- "compress/gzip"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/viper"
-)
-
-func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
- w, err := wallet.NewWalletFromFile(walletPath)
- if err != nil {
- return nil, err
- }
-
- password, err := input.ReadPassword("Enter password for wallet:")
- if err != nil {
- return nil, fmt.Errorf("can't fetch password: %w", err)
- }
-
- for i := range w.Accounts {
- if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
- err = fmt.Errorf("can't unlock wallet: %w", err)
- break
- }
- }
-
- return w, err
-}
-
-func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
- wallets, err := openAlphabetWallets(v, walletDir)
- if err != nil {
- return nil, err
- }
-
- if len(wallets) > constants.MaxAlphabetNodes {
- return nil, ErrTooManyAlphabetNodes
- }
- return wallets, nil
-}
-
-func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
- walletFiles, err := os.ReadDir(walletDir)
- if err != nil {
- return nil, fmt.Errorf("can't read alphabet wallets dir: %w", err)
- }
-
- var wallets []*wallet.Wallet
- var letter string
- for i := range constants.MaxAlphabetNodes {
- letter = innerring.GlagoliticLetter(i).String()
- p := filepath.Join(walletDir, letter+".json")
- var w *wallet.Wallet
- w, err = wallet.NewWalletFromFile(p)
- if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- err = nil
- } else {
- err = fmt.Errorf("can't open alphabet wallet: %w", err)
- }
- break
- }
-
- var password string
- password, err = config.GetPassword(v, letter)
- if err != nil {
- err = fmt.Errorf("can't fetch password: %w", err)
- break
- }
-
- for i := range w.Accounts {
- if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
- err = fmt.Errorf("can't unlock wallet: %w", err)
- break
- }
- }
-
- wallets = append(wallets, w)
- }
- if err != nil {
- return nil, fmt.Errorf("can't read wallet for letter '%s': %w", letter, err)
- }
- if len(wallets) == 0 {
- err = errors.New("there are no alphabet wallets in dir (run `generate-alphabet` command first)")
- if len(walletFiles) > 0 {
- err = fmt.Errorf("use glagolitic names for wallets(run `print-alphabet`): %w", err)
- }
- return nil, err
- }
- return wallets, nil
-}
-
-func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
- rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
- if err != nil {
- return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
- }
- rawManif, err := os.ReadFile(filepath.Join(ctrPath, "config.json"))
- if err != nil {
- return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
- }
-
- cs := &ContractState{
- RawNEF: rawNef,
- RawManifest: rawManif,
- }
-
- return cs, cs.Parse()
-}
-
-func readContractsFromArchive(file io.Reader, names []string) (map[string]*ContractState, error) {
- m := make(map[string]*ContractState, len(names))
- for i := range names {
- m[names[i]] = new(ContractState)
- }
-
- gr, err := gzip.NewReader(file)
- if err != nil {
- return nil, fmt.Errorf("contracts file must be tar.gz archive: %w", err)
- }
-
- r := tar.NewReader(gr)
- var h *tar.Header
- for h, err = r.Next(); err == nil && h != nil; h, err = r.Next() {
- if h.Typeflag != tar.TypeReg {
- continue
- }
- dir, _ := filepath.Split(h.Name)
- ctrName := filepath.Base(dir)
-
- cs, ok := m[ctrName]
- if !ok {
- continue
- }
-
- switch {
- case strings.HasSuffix(h.Name, filepath.Join(ctrName, ctrName+"_contract.nef")):
- cs.RawNEF, err = io.ReadAll(r)
- if err != nil {
- return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
- }
- case strings.HasSuffix(h.Name, "config.json"):
- cs.RawManifest, err = io.ReadAll(r)
- if err != nil {
- return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
- }
- }
- m[ctrName] = cs
- }
- if err != nil && err != io.EOF {
- return nil, fmt.Errorf("can't read contracts from archive: %w", err)
- }
-
- for ctrName, cs := range m {
- if cs.RawNEF == nil {
- return nil, fmt.Errorf("NEF for %s contract wasn't found", ctrName)
- }
- if cs.RawManifest == nil {
- return nil, fmt.Errorf("manifest for %s contract wasn't found", ctrName)
- }
- }
- return m, nil
-}
-
-func GetAlphabetNNSDomain(i int) string {
- return constants.AlphabetContract + strconv.FormatUint(uint64(i), 10) + ".frostfs"
-}
-
-func ParseGASAmount(s string) (fixedn.Fixed8, error) {
- gasAmount, err := fixedn.Fixed8FromString(s)
- if err != nil {
- return 0, fmt.Errorf("invalid GAS amount %s: %w", s, err)
- }
- if gasAmount <= 0 {
- return 0, fmt.Errorf("GAS amount must be positive (got %d)", gasAmount)
- }
- return gasAmount, nil
-}
-
-// GetContractByID retrieves a contract by its ID using the standard GetContractByID method.
-// However, if the returned state.Contract is nil, it returns an error indicating that the contract was not found.
-// See https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1210
-func GetContractByID(r *management.ContractReader, id int32) (*state.Contract, error) {
- cs, err := r.GetContractByID(id)
- if err != nil {
- return nil, err
- }
-
- if cs == nil {
- return nil, errors.New("contract not found")
- }
- return cs, nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go b/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go
deleted file mode 100644
index bd01cd59e..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/helper/wallet.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package helper
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func InitializeContractWallet(v *viper.Viper, walletDir string) (*wallet.Wallet, error) {
- password, err := config.GetPassword(v, constants.ContractWalletPasswordKey)
- if err != nil {
- return nil, err
- }
-
- w, err := wallet.NewWallet(filepath.Join(walletDir, constants.ContractWalletFilename))
- if err != nil {
- return nil, err
- }
-
- acc, err := wallet.NewAccount()
- if err != nil {
- return nil, err
- }
-
- err = acc.Encrypt(password, keys.NEP2ScryptParams())
- if err != nil {
- return nil, err
- }
-
- w.AddAccount(acc)
- if err := w.SavePretty(); err != nil {
- return nil, err
- }
-
- return w, nil
-}
-
-func openContractWallet(v *viper.Viper, cmd *cobra.Command, walletDir string) (*wallet.Wallet, error) {
- p := filepath.Join(walletDir, constants.ContractWalletFilename)
- w, err := wallet.NewWalletFromFile(p)
- if err != nil {
- if !os.IsNotExist(err) {
- return nil, fmt.Errorf("can't open wallet: %w", err)
- }
-
- cmd.Printf("Contract group wallet is missing, initialize at %s\n", p)
- return InitializeContractWallet(v, walletDir)
- }
-
- password, err := config.GetPassword(v, constants.ContractWalletPasswordKey)
- if err != nil {
- return nil, err
- }
-
- for i := range w.Accounts {
- if err := w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
- return nil, fmt.Errorf("can't unlock wallet: %w", err)
- }
- }
-
- return w, nil
-}
-
-func getWallet(cmd *cobra.Command, v *viper.Viper, needContracts bool, walletDir string) (*wallet.Wallet, error) {
- if !needContracts {
- return nil, nil
- }
- return openContractWallet(v, cmd, walletDir)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize.go
new file mode 100644
index 000000000..dec1fba20
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize.go
@@ -0,0 +1,514 @@
+package morph
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
+ morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ // maxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
+ // of the invocation script.
+ // See: https://github.com/nspcc-dev/neo-go/blob/740488f7f35e367eaa99a71c0a609c315fe2b0fc/pkg/core/transaction/witness.go#L10
+ maxAlphabetNodes = 22
+)
+
+type cache struct {
+ nnsCs *state.Contract
+ groupKey *keys.PublicKey
+}
+
+type initializeContext struct {
+ clientContext
+ cache
+ // CommitteeAcc is used for retrieving the committee address and the verification script.
+ CommitteeAcc *wallet.Account
+ // ConsensusAcc is used for retrieving the committee address and the verification script.
+ ConsensusAcc *wallet.Account
+ Wallets []*wallet.Wallet
+ // ContractWallet is a wallet for providing the contract group signature.
+ ContractWallet *wallet.Wallet
+ // Accounts contains simple signature accounts in the same order as in Wallets.
+ Accounts []*wallet.Account
+ Contracts map[string]*contractState
+ Command *cobra.Command
+ ContractPath string
+}
+
+var ErrTooManyAlphabetNodes = fmt.Errorf("too many alphabet nodes (maximum allowed is %d)", maxAlphabetNodes)
+
+func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
+ initCtx, err := newInitializeContext(cmd, viper.GetViper())
+ if err != nil {
+ return fmt.Errorf("initialization error: %w", err)
+ }
+ defer initCtx.close()
+
+ // 1. Transfer funds to committee accounts.
+ cmd.Println("Stage 1: transfer GAS to alphabet nodes.")
+ if err := initCtx.transferFunds(); err != nil {
+ return err
+ }
+
+ cmd.Println("Stage 2: set notary and alphabet nodes in designate contract.")
+ if err := initCtx.setNotaryAndAlphabetNodes(); err != nil {
+ return err
+ }
+
+ // 3. Deploy NNS contract.
+ cmd.Println("Stage 3: deploy NNS contract.")
+ if err := initCtx.deployNNS(deployMethodName); err != nil {
+ return err
+ }
+
+ // 4. Deploy NeoFS contracts.
+ cmd.Println("Stage 4: deploy NeoFS contracts.")
+ if err := initCtx.deployContracts(); err != nil {
+ return err
+ }
+
+ cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
+ if err := initCtx.transferGASToProxy(); err != nil {
+ return err
+ }
+
+ cmd.Println("Stage 5: register candidates.")
+ if err := initCtx.registerCandidates(); err != nil {
+ return err
+ }
+
+ cmd.Println("Stage 6: transfer NEO to alphabet contracts.")
+ if err := initCtx.transferNEOToAlphabetContracts(); err != nil {
+ return err
+ }
+
+ cmd.Println("Stage 7: set addresses in NNS.")
+ return initCtx.setNNS()
+}
+
+func (c *initializeContext) close() {
+ if local, ok := c.Client.(*localClient); ok {
+ err := local.dump()
+ if err != nil {
+ c.Command.PrintErrf("Can't write dump: %v\n", err)
+ os.Exit(1)
+ }
+ }
+}
+
+func newInitializeContext(cmd *cobra.Command, v *viper.Viper) (*initializeContext, error) {
+ walletDir := config.ResolveHomePath(viper.GetString(alphabetWalletsFlag))
+ wallets, err := openAlphabetWallets(v, walletDir)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(wallets) > maxAlphabetNodes {
+ return nil, ErrTooManyAlphabetNodes
+ }
+
+ needContracts := cmd.Name() == "update-contracts" || cmd.Name() == "init"
+
+ var w *wallet.Wallet
+ w, err = getWallet(cmd, v, needContracts, walletDir)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := createClient(cmd, v, wallets)
+ if err != nil {
+ return nil, err
+ }
+
+ committeeAcc, err := getWalletAccount(wallets[0], committeeAccountName)
+ if err != nil {
+ return nil, fmt.Errorf("can't find committee account: %w", err)
+ }
+
+ consensusAcc, err := getWalletAccount(wallets[0], consensusAccountName)
+ if err != nil {
+ return nil, fmt.Errorf("can't find consensus account: %w", err)
+ }
+
+ if err := validateInit(cmd); err != nil {
+ return nil, err
+ }
+
+ ctrPath, err := getContractsPath(cmd, needContracts)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkNotaryEnabled(c); err != nil {
+ return nil, err
+ }
+
+ accounts, err := createWalletAccounts(wallets)
+ if err != nil {
+ return nil, err
+ }
+
+ cliCtx, err := defaultClientContext(c, committeeAcc)
+ if err != nil {
+ return nil, fmt.Errorf("client context: %w", err)
+ }
+
+ initCtx := &initializeContext{
+ clientContext: *cliCtx,
+ ConsensusAcc: consensusAcc,
+ CommitteeAcc: committeeAcc,
+ ContractWallet: w,
+ Wallets: wallets,
+ Accounts: accounts,
+ Command: cmd,
+ Contracts: make(map[string]*contractState),
+ ContractPath: ctrPath,
+ }
+
+ if needContracts {
+ err := initCtx.readContracts(fullContractList)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return initCtx, nil
+}
+
+func validateInit(cmd *cobra.Command) error {
+ if cmd.Name() != "init" {
+ return nil
+ }
+ if viper.GetInt64(epochDurationInitFlag) <= 0 {
+ return fmt.Errorf("epoch duration must be positive")
+ }
+
+ if viper.GetInt64(maxObjectSizeInitFlag) <= 0 {
+ return fmt.Errorf("max object size must be positive")
+ }
+
+ return nil
+}
+
+func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (Client, error) {
+ var c Client
+ var err error
+ if ldf := cmd.Flags().Lookup(localDumpFlag); ldf != nil && ldf.Changed {
+ if cmd.Flags().Changed(endpointFlag) {
+ return nil, fmt.Errorf("`%s` and `%s` flags are mutually exclusive", endpointFlag, localDumpFlag)
+ }
+ c, err = newLocalClient(cmd, v, wallets, ldf.Value.String())
+ } else {
+ c, err = getN3Client(v)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("can't create N3 client: %w", err)
+ }
+ return c, nil
+}
+
+func getWallet(cmd *cobra.Command, v *viper.Viper, needContracts bool, walletDir string) (*wallet.Wallet, error) {
+ if !needContracts {
+ return nil, nil
+ }
+ return openContractWallet(v, cmd, walletDir)
+}
+
+func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
+ if !needContracts {
+ return "", nil
+ }
+
+ ctrPath, err := cmd.Flags().GetString(contractsInitFlag)
+ if err != nil {
+ return "", fmt.Errorf("invalid contracts path: %w", err)
+ }
+ return ctrPath, nil
+}
+
+func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
+ accounts := make([]*wallet.Account, len(wallets))
+ for i, w := range wallets {
+ acc, err := getWalletAccount(w, singleAccountName)
+ if err != nil {
+ return nil, fmt.Errorf("wallet %s is invalid (no single account): %w", w.Path(), err)
+ }
+ accounts[i] = acc
+ }
+ return accounts, nil
+}
+
+func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
+ walletFiles, err := os.ReadDir(walletDir)
+ if err != nil {
+ return nil, fmt.Errorf("can't read alphabet wallets dir: %w", err)
+ }
+
+ var size int
+loop:
+ for i := 0; i < len(walletFiles); i++ {
+ name := innerring.GlagoliticLetter(i).String() + ".json"
+ for j := range walletFiles {
+ if walletFiles[j].Name() == name {
+ size++
+ continue loop
+ }
+ }
+ break
+ }
+ if size == 0 {
+ return nil, errors.New("alphabet wallets dir is empty (run `generate-alphabet` command first)")
+ }
+
+ wallets := make([]*wallet.Wallet, size)
+ for i := 0; i < size; i++ {
+ letter := innerring.GlagoliticLetter(i).String()
+ p := filepath.Join(walletDir, letter+".json")
+ w, err := wallet.NewWalletFromFile(p)
+ if err != nil {
+ return nil, fmt.Errorf("can't open wallet: %w", err)
+ }
+
+ password, err := config.GetPassword(v, letter)
+ if err != nil {
+ return nil, fmt.Errorf("can't fetch password: %w", err)
+ }
+
+ for i := range w.Accounts {
+ if err := w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
+ return nil, fmt.Errorf("can't unlock wallet: %w", err)
+ }
+ }
+
+ wallets[i] = w
+ }
+
+ return wallets, nil
+}
+
+func (c *initializeContext) awaitTx() error {
+ return c.clientContext.awaitTx(c.Command)
+}
+
+func (c *initializeContext) nnsContractState() (*state.Contract, error) {
+ if c.nnsCs != nil {
+ return c.nnsCs, nil
+ }
+
+ cs, err := c.Client.GetContractStateByID(1)
+ if err != nil {
+ return nil, err
+ }
+
+ c.nnsCs = cs
+ return cs, nil
+}
+
+func (c *initializeContext) getSigner(tryGroup bool, acc *wallet.Account) transaction.Signer {
+ if tryGroup && c.groupKey != nil {
+ return transaction.Signer{
+ Account: acc.Contract.ScriptHash(),
+ Scopes: transaction.CustomGroups,
+ AllowedGroups: keys.PublicKeys{c.groupKey},
+ }
+ }
+
+ signer := transaction.Signer{
+ Account: acc.Contract.ScriptHash(),
+ Scopes: transaction.Global, // Scope is important, as we have nested call to container contract.
+ }
+
+ if !tryGroup {
+ return signer
+ }
+
+ nnsCs, err := c.nnsContractState()
+ if err != nil {
+ return signer
+ }
+
+ groupKey, err := nnsResolveKey(c.ReadOnlyInvoker, nnsCs.Hash, morphClient.NNSGroupKeyName)
+ if err == nil {
+ c.groupKey = groupKey
+
+ signer.Scopes = transaction.CustomGroups
+ signer.AllowedGroups = keys.PublicKeys{groupKey}
+ }
+ return signer
+}
+
+func (c *clientContext) awaitTx(cmd *cobra.Command) error {
+ if len(c.SentTxs) == 0 {
+ return nil
+ }
+
+ if local, ok := c.Client.(*localClient); ok {
+ if err := local.putTransactions(); err != nil {
+ return fmt.Errorf("can't persist transactions: %w", err)
+ }
+ }
+
+ err := awaitTx(cmd, c.Client, c.SentTxs)
+ c.SentTxs = c.SentTxs[:0]
+
+ return err
+}
+
+func awaitTx(cmd *cobra.Command, c Client, txs []hashVUBPair) error {
+ cmd.Println("Waiting for transactions to persist...")
+
+ const pollInterval = time.Second
+
+ tick := time.NewTicker(pollInterval)
+ defer tick.Stop()
+
+ at := trigger.Application
+
+ var retErr error
+
+ currBlock, err := c.GetBlockCount()
+ if err != nil {
+ return fmt.Errorf("can't fetch current block height: %w", err)
+ }
+
+loop:
+ for i := range txs {
+ res, err := c.GetApplicationLog(txs[i].hash, &at)
+ if err == nil {
+ if retErr == nil && len(res.Executions) > 0 && res.Executions[0].VMState != vmstate.Halt {
+ retErr = fmt.Errorf("tx %d persisted in %s state: %s",
+ i, res.Executions[0].VMState, res.Executions[0].FaultException)
+ }
+ continue loop
+ }
+ if txs[i].vub < currBlock {
+ return fmt.Errorf("tx was not persisted: vub=%d, height=%d", txs[i].vub, currBlock)
+ }
+ for range tick.C {
+ // We must fetch current height before application log, to avoid race condition.
+ currBlock, err = c.GetBlockCount()
+ if err != nil {
+ return fmt.Errorf("can't fetch current block height: %w", err)
+ }
+ res, err := c.GetApplicationLog(txs[i].hash, &at)
+ if err == nil {
+ if retErr == nil && len(res.Executions) > 0 && res.Executions[0].VMState != vmstate.Halt {
+ retErr = fmt.Errorf("tx %d persisted in %s state: %s",
+ i, res.Executions[0].VMState, res.Executions[0].FaultException)
+ }
+ continue loop
+ }
+ if txs[i].vub < currBlock {
+ return fmt.Errorf("tx was not persisted: vub=%d, height=%d", txs[i].vub, currBlock)
+ }
+ }
+ }
+
+ return retErr
+}
+
+// sendCommitteeTx creates transaction from script, signs it by committee nodes and sends it to RPC.
+// If tryGroup is false, global scope is used for the signer (useful when
+// working with native contracts).
+func (c *initializeContext) sendCommitteeTx(script []byte, tryGroup bool) error {
+ return c.sendMultiTx(script, tryGroup, false)
+}
+
+// sendConsensusTx creates transaction from script, signs it by alphabet nodes and sends it to RPC.
+// Not that because this is used only after the contracts were initialized and deployed,
+// we always try to have a group scope.
+func (c *initializeContext) sendConsensusTx(script []byte) error {
+ return c.sendMultiTx(script, true, true)
+}
+
+func (c *initializeContext) sendMultiTx(script []byte, tryGroup bool, withConsensus bool) error {
+ var act *actor.Actor
+ var err error
+
+ withConsensus = withConsensus && !c.ConsensusAcc.Contract.ScriptHash().Equals(c.CommitteeAcc.ScriptHash())
+ if tryGroup {
+ // Even for consensus signatures we need the committee to pay.
+ signers := make([]actor.SignerAccount, 1, 2)
+ signers[0] = actor.SignerAccount{
+ Signer: c.getSigner(tryGroup, c.CommitteeAcc),
+ Account: c.CommitteeAcc,
+ }
+ if withConsensus {
+ signers = append(signers, actor.SignerAccount{
+ Signer: c.getSigner(tryGroup, c.ConsensusAcc),
+ Account: c.ConsensusAcc,
+ })
+ }
+ act, err = actor.New(c.Client, signers)
+ } else {
+ if withConsensus {
+ panic("BUG: should never happen")
+ }
+ act, err = c.CommitteeAct, nil
+ }
+ if err != nil {
+ return fmt.Errorf("could not create actor: %w", err)
+ }
+
+ tx, err := act.MakeUnsignedRun(script, []transaction.Attribute{{Type: transaction.HighPriority}})
+ if err != nil {
+ return fmt.Errorf("could not perform test invocation: %w", err)
+ }
+
+ if err := c.multiSign(tx, committeeAccountName); err != nil {
+ return err
+ }
+ if withConsensus {
+ if err := c.multiSign(tx, consensusAccountName); err != nil {
+ return err
+ }
+ }
+
+ return c.sendTx(tx, c.Command, false)
+}
+
+func getWalletAccount(w *wallet.Wallet, typ string) (*wallet.Account, error) {
+ for i := range w.Accounts {
+ if w.Accounts[i].Label == typ {
+ return w.Accounts[i], nil
+ }
+ }
+ return nil, fmt.Errorf("account for '%s' not found", typ)
+}
+
+func checkNotaryEnabled(c Client) error {
+ ns, err := c.GetNativeContracts()
+ if err != nil {
+ return fmt.Errorf("can't get native contract hashes: %w", err)
+ }
+
+ notaryEnabled := false
+ nativeHashes := make(map[string]util.Uint160, len(ns))
+ for i := range ns {
+ if ns[i].Manifest.Name == nativenames.Notary {
+ notaryEnabled = len(ns[i].UpdateHistory) > 0
+ }
+ nativeHashes[ns[i].Manifest.Name] = ns[i].Hash
+ }
+ if !notaryEnabled {
+ return errors.New("notary contract must be enabled")
+ }
+ return nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go
deleted file mode 100644
index cdaf7d3bc..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package initialize
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
- initCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return fmt.Errorf("initialization error: %w", err)
- }
- defer initCtx.Close()
-
- // 1. Transfer funds to committee accounts.
- cmd.Println("Stage 1: transfer GAS to alphabet nodes.")
- if err := transferFunds(initCtx); err != nil {
- return err
- }
-
- cmd.Println("Stage 2: set notary and alphabet nodes in designate contract.")
- if err := setNotaryAndAlphabetNodes(initCtx); err != nil {
- return err
- }
-
- // 3. Deploy NNS contract.
- cmd.Println("Stage 3: deploy NNS contract.")
- if err := helper.DeployNNS(initCtx, constants.DeployMethodName); err != nil {
- return err
- }
-
- // 4. Deploy NeoFS contracts.
- cmd.Println("Stage 4: deploy NeoFS contracts.")
- if err := deployContracts(initCtx); err != nil {
- return err
- }
-
- cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
- if err := transferGASToProxy(initCtx); err != nil {
- return err
- }
-
- cmd.Println("Stage 5: register candidates.")
- if err := registerCandidates(initCtx); err != nil {
- return err
- }
-
- cmd.Println("Stage 6: transfer NEO to alphabet contracts.")
- if err := transferNEOToAlphabetContracts(initCtx); err != nil {
- return err
- }
-
- cmd.Println("Stage 7: set addresses in NNS.")
- return setNNS(initCtx)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go
deleted file mode 100644
index f40ea732c..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_deploy.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package initialize
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
-)
-
-func deployContracts(c *helper.InitializeContext) error {
- alphaCs := c.GetContract(constants.AlphabetContract)
-
- var keysParam []any
-
- baseGroups := alphaCs.Manifest.Groups
-
- // alphabet contracts should be deployed by individual nodes to get different hashes.
- for i, acc := range c.Accounts {
- ctrHash := state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name)
- if c.IsUpdated(ctrHash, alphaCs) {
- c.Command.Printf("Alphabet contract #%d is already deployed.\n", i)
- continue
- }
-
- alphaCs.Manifest.Groups = baseGroups
- err := helper.AddManifestGroup(c.ContractWallet, ctrHash, alphaCs)
- if err != nil {
- return fmt.Errorf("can't sign manifest group: %v", err)
- }
-
- keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes())
- params := helper.GetContractDeployParameters(alphaCs, c.GetAlphabetDeployItems(i, len(c.Wallets)))
-
- act, err := actor.NewSimple(c.Client, acc)
- if err != nil {
- return fmt.Errorf("could not create actor: %w", err)
- }
-
- txHash, vub, err := act.SendCall(management.Hash, constants.DeployMethodName, params...)
- if err != nil {
- return fmt.Errorf("can't deploy alphabet #%d contract: %w", i, err)
- }
-
- c.SentTxs = append(c.SentTxs, helper.HashVUBPair{Hash: txHash, Vub: vub})
- }
-
- for _, ctrName := range constants.ContractList {
- cs := c.GetContract(ctrName)
-
- ctrHash := cs.Hash
- if c.IsUpdated(ctrHash, cs) {
- c.Command.Printf("%s contract is already deployed.\n", ctrName)
- continue
- }
-
- err := helper.AddManifestGroup(c.ContractWallet, ctrHash, cs)
- if err != nil {
- return fmt.Errorf("can't sign manifest group: %v", err)
- }
-
- args, err := helper.GetContractDeployData(c, ctrName, keysParam, constants.DeployMethodName)
- if err != nil {
- return fmt.Errorf("%s: getting deploy params: %v", ctrName, err)
- }
- params := helper.GetContractDeployParameters(cs, args)
- res, err := c.CommitteeAct.MakeCall(management.Hash, constants.DeployMethodName, params...)
- if err != nil {
- return fmt.Errorf("can't deploy %s contract: %w", ctrName, err)
- }
-
- if err := c.SendCommitteeTx(res.Script, false); err != nil {
- return err
- }
- }
-
- return c.AwaitTx()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
deleted file mode 100644
index 176356378..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package initialize
-
-import (
- "encoding/hex"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
-)
-
-func setNNS(c *helper.InitializeContext) error {
- r := management.NewReader(c.ReadOnlyInvoker)
- nnsCs, err := helper.GetContractByID(r, 1)
- if err != nil {
- return err
- }
-
- ok, err := c.NNSRootRegistered(nnsCs.Hash, "frostfs")
- if err != nil {
- return err
- } else if !ok {
- bw := io.NewBufBinWriter()
- emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
- "frostfs", c.CommitteeAcc.Contract.ScriptHash(),
- constants.FrostfsOpsEmail, constants.NNSRefreshDefVal, constants.NNSRetryDefVal,
- int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
- emit.Opcodes(bw.BinWriter, opcode.ASSERT)
- if err := c.SendCommitteeTx(bw.Bytes(), true); err != nil {
- return fmt.Errorf("can't add domain root to NNS: %w", err)
- }
- if err := c.AwaitTx(); err != nil {
- return err
- }
- }
-
- alphaCs := c.GetContract(constants.AlphabetContract)
- for i, acc := range c.Accounts {
- alphaCs.Hash = state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name)
-
- domain := helper.GetAlphabetNNSDomain(i)
- if err := nnsRegisterDomain(c, nnsCs.Hash, alphaCs.Hash, domain); err != nil {
- return err
- }
- c.Command.Printf("NNS: Set %s -> %s\n", domain, alphaCs.Hash.StringLE())
- }
-
- for _, ctrName := range constants.ContractList {
- cs := c.GetContract(ctrName)
-
- domain := ctrName + ".frostfs"
- if err := nnsRegisterDomain(c, nnsCs.Hash, cs.Hash, domain); err != nil {
- return err
- }
- c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE())
- }
-
- groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey()
- err = updateNNSGroup(c, nnsCs.Hash, groupKey)
- if err != nil {
- return err
- }
- c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes()))
-
- return c.AwaitTx()
-}
-
-func updateNNSGroup(c *helper.InitializeContext, nnsHash util.Uint160, pub *keys.PublicKey) error {
- bw := io.NewBufBinWriter()
- keyAlreadyAdded, domainRegCodeEmitted, err := c.EmitUpdateNNSGroupScript(bw, nnsHash, pub)
- if keyAlreadyAdded || err != nil {
- return err
- }
-
- script := bw.Bytes()
- if domainRegCodeEmitted {
- w := io.NewBufBinWriter()
- emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
- wrapRegisterScriptWithPrice(w, nnsHash, script)
- script = w.Bytes()
- }
-
- return c.SendCommitteeTx(script, true)
-}
-
-// wrapRegisterScriptWithPrice wraps a given script with `getPrice`/`setPrice` calls for NNS.
-// It is intended to be used for a single transaction, and not as a part of other scripts.
-// It is assumed that script already contains static slot initialization code, the first one
-// (with index 0) is used to store the price.
-func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []byte) {
- if len(s) == 0 {
- return
- }
-
- emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All)
- emit.Opcodes(w.BinWriter, opcode.STSFLD0)
- emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1)
-
- w.WriteBytes(s)
-
- emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
- emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
-
- assert.NoError(w.Err, "can't wrap register script")
-}
-
-func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {
- script, ok, err := c.NNSRegisterDomainScript(nnsHash, expectedHash, domain)
- if ok || err != nil {
- return err
- }
-
- w := io.NewBufBinWriter()
- emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
- wrapRegisterScriptWithPrice(w, nnsHash, script)
-
- emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT))
- emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
- domain, int64(nns.TXT), expectedHash.StringLE())
- emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
- domain, int64(nns.TXT), address.Uint160ToString(expectedHash))
- return c.SendCommitteeTx(w.Bytes(), true)
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
deleted file mode 100644
index bb684b3a9..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package initialize
-
-import (
- "fmt"
- "math/big"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "github.com/nspcc-dev/neo-go/pkg/core/native"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
-)
-
-const (
- // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
- initialAlphabetGASAmount = 10_000 * native.GASFactor
- // initialProxyGASAmount represents the amount of GAS given to a proxy contract.
- initialProxyGASAmount = 50_000 * native.GASFactor
-)
-
-func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
- return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
-}
-
-func transferFunds(c *helper.InitializeContext) error {
- ok, err := transferFundsFinished(c)
- if ok || err != nil {
- if err == nil {
- c.Command.Println("Stage 1: already performed.")
- }
- return err
- }
-
- version, err := c.Client.GetVersion()
- if err != nil {
- return err
- }
-
- var transfers []transferTarget
- for _, acc := range c.Accounts {
- to := acc.Contract.ScriptHash()
- transfers = append(transfers,
- transferTarget{
- Token: gas.Hash,
- Address: to,
- Amount: initialAlphabetGASAmount,
- },
- )
- }
-
- // It is convenient to have all funds at the committee account.
- transfers = append(transfers,
- transferTarget{
- Token: gas.Hash,
- Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
- },
- transferTarget{
- Token: neo.Hash,
- Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: native.NEOTotalSupply,
- },
- )
-
- tx, err := createNEP17MultiTransferTx(c.Client, c.ConsensusAcc, transfers)
- if err != nil {
- return fmt.Errorf("can't create transfer transaction: %w", err)
- }
-
- if err := c.MultiSignAndSend(tx, constants.ConsensusAccountName); err != nil {
- return fmt.Errorf("can't send transfer transaction: %w", err)
- }
-
- return c.AwaitTx()
-}
-
-// transferFundsFinished checks balances of accounts we transfer GAS to.
-// The stage is considered finished if the balance is greater than the half of what we need to transfer.
-func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
- r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
- res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
- if err != nil {
- return false, err
- }
-
- version, err := c.Client.GetVersion()
- if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
- return false, err
- }
-
- res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
- if err != nil {
- return false, err
- }
-
- return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
-}
-
-func transferGASToProxy(c *helper.InitializeContext) error {
- proxyCs := c.GetContract(constants.ProxyContract)
-
- r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
- bal, err := r.BalanceOf(proxyCs.Hash)
- if err != nil || bal.Sign() > 0 {
- return err
- }
-
- tx, err := createNEP17MultiTransferTx(c.Client, c.CommitteeAcc, []transferTarget{{
- Token: gas.Hash,
- Address: proxyCs.Hash,
- Amount: initialProxyGASAmount,
- }})
- if err != nil {
- return err
- }
-
- if err := c.MultiSignAndSend(tx, constants.CommitteeAccountName); err != nil {
- return err
- }
-
- return c.AwaitTx()
-}
-
-type transferTarget struct {
- Token util.Uint160
- Address util.Uint160
- Amount int64
- Data any
-}
-
-func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients []transferTarget) (*transaction.Transaction, error) {
- from := acc.Contract.ScriptHash()
-
- w := io.NewBufBinWriter()
- for i := range recipients {
- emit.AppCall(w.BinWriter, recipients[i].Token, "transfer", callflag.All,
- from, recipients[i].Address, recipients[i].Amount, recipients[i].Data)
- emit.Opcodes(w.BinWriter, opcode.ASSERT)
- }
- if w.Err != nil {
- return nil, fmt.Errorf("failed to create transfer script: %w", w.Err)
- }
-
- signers := []actor.SignerAccount{{
- Signer: transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.CalledByEntry,
- },
- Account: acc,
- }}
-
- act, err := actor.New(c, signers)
- if err != nil {
- return nil, fmt.Errorf("can't create actor: %w", err)
- }
- tx, err := act.MakeRun(w.Bytes())
- if err != nil {
- sum := make(map[util.Uint160]int64)
- for _, recipient := range recipients {
- sum[recipient.Token] += recipient.Amount
- }
- detail := make([]string, 0, len(sum))
- for _, value := range sum {
- detail = append(detail, fmt.Sprintf("amount=%v", value))
- }
- err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err)
- }
- return tx, err
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
deleted file mode 100644
index 50f14e728..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package initialize
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- maxObjectSizeCLIFlag = "max-object-size"
- epochDurationCLIFlag = "epoch-duration"
- containerFeeCLIFlag = "container-fee"
- containerAliasFeeCLIFlag = "container-alias-fee"
- candidateFeeCLIFlag = "candidate-fee"
- homomorphicHashDisabledCLIFlag = "homomorphic-disabled"
- withdrawFeeCLIFlag = "withdraw-fee"
-)
-
-var Cmd = &cobra.Command{
- Use: "init",
- Short: "Initialize side chain network with smart-contracts and network settings",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.EpochDurationInitFlag, cmd.Flags().Lookup(epochDurationCLIFlag))
- _ = viper.BindPFlag(commonflags.MaxObjectSizeInitFlag, cmd.Flags().Lookup(maxObjectSizeCLIFlag))
- _ = viper.BindPFlag(commonflags.MaxECDataCountFlag, cmd.Flags().Lookup(commonflags.MaxECDataCountFlag))
- _ = viper.BindPFlag(commonflags.MaxECParityCounFlag, cmd.Flags().Lookup(commonflags.MaxECParityCounFlag))
- _ = viper.BindPFlag(commonflags.HomomorphicHashDisabledInitFlag, cmd.Flags().Lookup(homomorphicHashDisabledCLIFlag))
- _ = viper.BindPFlag(commonflags.CandidateFeeInitFlag, cmd.Flags().Lookup(candidateFeeCLIFlag))
- _ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
- _ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
- _ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
- _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath))
- },
- RunE: initializeSideChainCmd,
-}
-
-func initInitCmd() {
- Cmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- Cmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- Cmd.Flags().String(commonflags.ContractsInitFlag, "", commonflags.ContractsInitFlagDesc)
- Cmd.Flags().String(commonflags.ContractsURLFlag, "", commonflags.ContractsURLFlagDesc)
- Cmd.Flags().Uint(epochDurationCLIFlag, 240, "Amount of side chain blocks in one FrostFS epoch")
- Cmd.Flags().Uint(maxObjectSizeCLIFlag, 67108864, "Max single object size in bytes")
- Cmd.Flags().Bool(homomorphicHashDisabledCLIFlag, false, "Disable object homomorphic hashing")
- // Defaults are taken from neo-preodolenie.
- Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee")
- Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee")
- Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration")
- Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
- Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag)
-}
-
-func init() {
- initInitCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
new file mode 100644
index 000000000..a7cd537b7
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
@@ -0,0 +1,617 @@
+package morph
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
+ morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ io2 "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/nef"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+)
+
+const (
+ nnsContract = "nns"
+ frostfsContract = "frostfs" // not deployed in side-chain.
+ processingContract = "processing" // not deployed in side-chain.
+ alphabetContract = "alphabet"
+ balanceContract = "balance"
+ containerContract = "container"
+ frostfsIDContract = "frostfsid"
+ netmapContract = "netmap"
+ proxyContract = "proxy"
+)
+
+var (
+ contractList = []string{
+ balanceContract,
+ containerContract,
+ frostfsIDContract,
+ netmapContract,
+ proxyContract,
+ }
+
+ fullContractList = append([]string{
+ frostfsContract,
+ processingContract,
+ nnsContract,
+ alphabetContract,
+ }, contractList...)
+
+ netmapConfigKeys = []string{
+ netmap.EpochDurationConfig,
+ netmap.MaxObjectSizeConfig,
+ netmap.ContainerFeeConfig,
+ netmap.ContainerAliasFeeConfig,
+ netmap.IrCandidateFeeConfig,
+ netmap.WithdrawFeeConfig,
+ netmap.HomomorphicHashingDisabledKey,
+ netmap.MaintenanceModeAllowedConfig,
+ }
+)
+
+type contractState struct {
+ NEF *nef.File
+ RawNEF []byte
+ Manifest *manifest.Manifest
+ RawManifest []byte
+ Hash util.Uint160
+}
+
+const (
+ updateMethodName = "update"
+ deployMethodName = "deploy"
+)
+
+func (c *initializeContext) deployNNS(method string) error {
+ cs := c.getContract(nnsContract)
+ h := cs.Hash
+
+ nnsCs, err := c.nnsContractState()
+ if err == nil {
+ if nnsCs.NEF.Checksum == cs.NEF.Checksum {
+ if method == deployMethodName {
+ c.Command.Println("NNS contract is already deployed.")
+ } else {
+ c.Command.Println("NNS contract is already updated.")
+ }
+ return nil
+ }
+ h = nnsCs.Hash
+ }
+
+ err = c.addManifestGroup(h, cs)
+ if err != nil {
+ return fmt.Errorf("can't sign manifest group: %v", err)
+ }
+
+ params := getContractDeployParameters(cs, nil)
+ signer := transaction.Signer{
+ Account: c.CommitteeAcc.Contract.ScriptHash(),
+ Scopes: transaction.CalledByEntry,
+ }
+
+ invokeHash := management.Hash
+ if method == updateMethodName {
+ invokeHash = nnsCs.Hash
+ }
+
+ res, err := invokeFunction(c.Client, invokeHash, method, params, []transaction.Signer{signer})
+ if err != nil {
+ return fmt.Errorf("can't deploy NNS contract: %w", err)
+ }
+ if res.State != vmstate.Halt.String() {
+ return fmt.Errorf("can't deploy NNS contract: %s", res.FaultException)
+ }
+
+ tx, err := c.Client.CreateTxFromScript(res.Script, c.CommitteeAcc, res.GasConsumed, 0, []rpcclient.SignerAccount{{
+ Signer: signer,
+ Account: c.CommitteeAcc,
+ }})
+ if err != nil {
+ return fmt.Errorf("failed to create deploy tx for %s: %w", nnsContract, err)
+ }
+
+ if err := c.multiSignAndSend(tx, committeeAccountName); err != nil {
+ return fmt.Errorf("can't send deploy transaction: %w", err)
+ }
+
+ return c.awaitTx()
+}
+
+func (c *initializeContext) updateContracts() error {
+ alphaCs := c.getContract(alphabetContract)
+
+ nnsCs, err := c.nnsContractState()
+ if err != nil {
+ return err
+ }
+ nnsHash := nnsCs.Hash
+
+ w := io2.NewBufBinWriter()
+
+ // Update script size for a single-node committee is close to the maximum allowed size of 65535.
+ // Because of this we want to reuse alphabet contract NEF and manifest for different updates.
+ // The generated script is as following.
+ // 1. Initialize static slot for alphabet NEF.
+ // 2. Store NEF into the static slot.
+ // 3. Push parameters for each alphabet contract on stack.
+ // 4. Add contract group to the manifest.
+ // 5. For each alphabet contract, invoke `update` using parameters on stack and
+ // NEF from step 2 and manifest from step 4.
+ emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
+ emit.Bytes(w.BinWriter, alphaCs.RawNEF)
+ emit.Opcodes(w.BinWriter, opcode.STSFLD0)
+
+ keysParam, err := c.deployAlphabetAccounts(nnsHash, w, alphaCs)
+ if err != nil {
+ return err
+ }
+
+ w.Reset()
+
+ if err = c.deployOrUpdateContracts(w, nnsHash, keysParam); err != nil {
+ return err
+ }
+
+ groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey()
+ _, _, err = c.emitUpdateNNSGroupScript(w, nnsHash, groupKey)
+ if err != nil {
+ return err
+ }
+ c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes()))
+
+ emit.Opcodes(w.BinWriter, opcode.LDSFLD0)
+ emit.Int(w.BinWriter, 1)
+ emit.Opcodes(w.BinWriter, opcode.PACK)
+ emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
+
+ if err := c.sendCommitteeTx(w.Bytes(), false); err != nil {
+ return err
+ }
+ return c.awaitTx()
+}
+
+func (c *initializeContext) deployOrUpdateContracts(w *io2.BufBinWriter, nnsHash util.Uint160, keysParam []any) error {
+ emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
+ emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All)
+ emit.Opcodes(w.BinWriter, opcode.STSFLD0)
+ emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1)
+
+ for _, ctrName := range contractList {
+ cs := c.getContract(ctrName)
+
+ method := updateMethodName
+ ctrHash, err := nnsResolveHash(c.ReadOnlyInvoker, nnsHash, ctrName+".frostfs")
+ if err != nil {
+ if errors.Is(err, errMissingNNSRecord) {
+ // if contract not found we deploy it instead of update
+ method = deployMethodName
+ } else {
+ return fmt.Errorf("can't resolve hash for contract update: %w", err)
+ }
+ }
+
+ err = c.addManifestGroup(ctrHash, cs)
+ if err != nil {
+ return fmt.Errorf("can't sign manifest group: %v", err)
+ }
+
+ invokeHash := management.Hash
+ if method == updateMethodName {
+ invokeHash = ctrHash
+ }
+
+ params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, updateMethodName))
+ res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
+ if err != nil {
+ if method != updateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
+ return fmt.Errorf("deploy contract: %w", err)
+ }
+ c.Command.Printf("%s contract is already updated.\n", ctrName)
+ continue
+ }
+
+ w.WriteBytes(res.Script)
+
+ if method == deployMethodName {
+ // same actions are done in initializeContext.setNNS, can be unified
+ domain := ctrName + ".frostfs"
+ script, ok, err := c.nnsRegisterDomainScript(nnsHash, cs.Hash, domain)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ w.WriteBytes(script)
+ emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT))
+ emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
+ domain, int64(nns.TXT), cs.Hash.StringLE())
+ emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
+ domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
+ }
+ c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE())
+ }
+ }
+ return nil
+}
+
+func (c *initializeContext) deployAlphabetAccounts(nnsHash util.Uint160, w *io2.BufBinWriter, alphaCs *contractState) ([]any, error) {
+ var keysParam []any
+
+ baseGroups := alphaCs.Manifest.Groups
+
+ // alphabet contracts should be deployed by individual nodes to get different hashes.
+ for i, acc := range c.Accounts {
+ ctrHash, err := nnsResolveHash(c.ReadOnlyInvoker, nnsHash, getAlphabetNNSDomain(i))
+ if err != nil {
+ return nil, fmt.Errorf("can't resolve hash for contract update: %w", err)
+ }
+
+ keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes())
+
+ params := c.getAlphabetDeployItems(i, len(c.Wallets))
+ emit.Array(w.BinWriter, params...)
+
+ alphaCs.Manifest.Groups = baseGroups
+ err = c.addManifestGroup(ctrHash, alphaCs)
+ if err != nil {
+ return nil, fmt.Errorf("can't sign manifest group: %v", err)
+ }
+
+ emit.Bytes(w.BinWriter, alphaCs.RawManifest)
+ emit.Opcodes(w.BinWriter, opcode.LDSFLD0)
+ emit.Int(w.BinWriter, 3)
+ emit.Opcodes(w.BinWriter, opcode.PACK)
+ emit.AppCallNoArgs(w.BinWriter, ctrHash, updateMethodName, callflag.All)
+ }
+ if err := c.sendCommitteeTx(w.Bytes(), false); err != nil {
+ if !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
+ return nil, err
+ }
+ c.Command.Println("Alphabet contracts are already updated.")
+ }
+
+ return keysParam, nil
+}
+
+func (c *initializeContext) deployContracts() error {
+ alphaCs := c.getContract(alphabetContract)
+
+ var keysParam []any
+
+ baseGroups := alphaCs.Manifest.Groups
+
+ // alphabet contracts should be deployed by individual nodes to get different hashes.
+ for i, acc := range c.Accounts {
+ ctrHash := state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name)
+ if c.isUpdated(ctrHash, alphaCs) {
+ c.Command.Printf("Alphabet contract #%d is already deployed.\n", i)
+ continue
+ }
+
+ alphaCs.Manifest.Groups = baseGroups
+ err := c.addManifestGroup(ctrHash, alphaCs)
+ if err != nil {
+ return fmt.Errorf("can't sign manifest group: %v", err)
+ }
+
+ keysParam = append(keysParam, acc.PrivateKey().PublicKey().Bytes())
+ params := getContractDeployParameters(alphaCs, c.getAlphabetDeployItems(i, len(c.Wallets)))
+
+ act, err := actor.NewSimple(c.Client, acc)
+ if err != nil {
+ return fmt.Errorf("could not create actor: %w", err)
+ }
+
+ txHash, vub, err := act.SendCall(management.Hash, deployMethodName, params...)
+ if err != nil {
+ return fmt.Errorf("can't deploy alphabet #%d contract: %w", i, err)
+ }
+
+ c.SentTxs = append(c.SentTxs, hashVUBPair{hash: txHash, vub: vub})
+ }
+
+ for _, ctrName := range contractList {
+ cs := c.getContract(ctrName)
+
+ ctrHash := cs.Hash
+ if c.isUpdated(ctrHash, cs) {
+ c.Command.Printf("%s contract is already deployed.\n", ctrName)
+ continue
+ }
+
+ err := c.addManifestGroup(ctrHash, cs)
+ if err != nil {
+ return fmt.Errorf("can't sign manifest group: %v", err)
+ }
+
+ params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, deployMethodName))
+ res, err := c.CommitteeAct.MakeCall(management.Hash, deployMethodName, params...)
+ if err != nil {
+ return fmt.Errorf("can't deploy %s contract: %w", ctrName, err)
+ }
+
+ if err := c.sendCommitteeTx(res.Script, false); err != nil {
+ return err
+ }
+ }
+
+ return c.awaitTx()
+}
+
+func (c *initializeContext) isUpdated(ctrHash util.Uint160, cs *contractState) bool {
+ realCs, err := c.Client.GetContractStateByHash(ctrHash)
+ return err == nil && realCs.NEF.Checksum == cs.NEF.Checksum
+}
+
+func (c *initializeContext) getContract(ctrName string) *contractState {
+ return c.Contracts[ctrName]
+}
+
+func (c *initializeContext) readContracts(names []string) error {
+ var (
+ fi os.FileInfo
+ err error
+ )
+ if c.ContractPath != "" {
+ fi, err = os.Stat(c.ContractPath)
+ if err != nil {
+ return fmt.Errorf("invalid contracts path: %w", err)
+ }
+ }
+
+ if c.ContractPath != "" && fi.IsDir() {
+ for _, ctrName := range names {
+ cs, err := readContract(filepath.Join(c.ContractPath, ctrName), ctrName)
+ if err != nil {
+ return err
+ }
+ c.Contracts[ctrName] = cs
+ }
+ } else {
+ var r io.ReadCloser
+ if c.ContractPath == "" {
+ return errors.New("contracts flag is missing")
+ }
+ r, err = os.Open(c.ContractPath)
+ if err != nil {
+ return fmt.Errorf("can't open contracts archive: %w", err)
+ }
+ defer r.Close()
+
+ m, err := readContractsFromArchive(r, names)
+ if err != nil {
+ return err
+ }
+ for _, name := range names {
+ if err := m[name].parse(); err != nil {
+ return err
+ }
+ c.Contracts[name] = m[name]
+ }
+ }
+
+ for _, ctrName := range names {
+ if ctrName != alphabetContract {
+ cs := c.Contracts[ctrName]
+ cs.Hash = state.CreateContractHash(c.CommitteeAcc.Contract.ScriptHash(),
+ cs.NEF.Checksum, cs.Manifest.Name)
+ }
+ }
+ return nil
+}
+
+func readContract(ctrPath, ctrName string) (*contractState, error) {
+ rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
+ if err != nil {
+ return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
+ }
+ rawManif, err := os.ReadFile(filepath.Join(ctrPath, "config.json"))
+ if err != nil {
+ return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
+ }
+
+ cs := &contractState{
+ RawNEF: rawNef,
+ RawManifest: rawManif,
+ }
+
+ return cs, cs.parse()
+}
+
+func (cs *contractState) parse() error {
+ nf, err := nef.FileFromBytes(cs.RawNEF)
+ if err != nil {
+ return fmt.Errorf("can't parse NEF file: %w", err)
+ }
+
+ m := new(manifest.Manifest)
+ if err := json.Unmarshal(cs.RawManifest, m); err != nil {
+ return fmt.Errorf("can't parse manifest file: %w", err)
+ }
+
+ cs.NEF = &nf
+ cs.Manifest = m
+ return nil
+}
+
+func readContractsFromArchive(file io.Reader, names []string) (map[string]*contractState, error) {
+ m := make(map[string]*contractState, len(names))
+ for i := range names {
+ m[names[i]] = new(contractState)
+ }
+
+ gr, err := gzip.NewReader(file)
+ if err != nil {
+ return nil, fmt.Errorf("contracts file must be tar.gz archive: %w", err)
+ }
+
+ r := tar.NewReader(gr)
+ for h, err := r.Next(); ; h, err = r.Next() {
+ if err != nil {
+ break
+ }
+
+ dir, _ := filepath.Split(h.Name)
+ ctrName := filepath.Base(dir)
+
+ cs, ok := m[ctrName]
+ if !ok {
+ continue
+ }
+
+ switch {
+ case strings.HasSuffix(h.Name, filepath.Join(ctrName, ctrName+"_contract.nef")):
+ cs.RawNEF, err = io.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
+ }
+ case strings.HasSuffix(h.Name, "config.json"):
+ cs.RawManifest, err = io.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
+ }
+ }
+ m[ctrName] = cs
+ }
+
+ for ctrName, cs := range m {
+ if cs.RawNEF == nil {
+ return nil, fmt.Errorf("NEF for %s contract wasn't found", ctrName)
+ }
+ if cs.RawManifest == nil {
+ return nil, fmt.Errorf("manifest for %s contract wasn't found", ctrName)
+ }
+ }
+ return m, nil
+}
+
+func getContractDeployParameters(cs *contractState, deployData []any) []any {
+ return []any{cs.RawNEF, cs.RawManifest, deployData}
+}
+
+func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any, method string) []any {
+ items := make([]any, 1, 6)
+ items[0] = false // notaryDisabled is false
+
+ switch ctrName {
+ case frostfsContract:
+ items = append(items,
+ c.Contracts[processingContract].Hash,
+ keysParam,
+ smartcontract.Parameter{})
+ case processingContract:
+ items = append(items, c.Contracts[frostfsContract].Hash)
+ return items[1:] // no notary info
+ case balanceContract:
+ items = append(items,
+ c.Contracts[netmapContract].Hash,
+ c.Contracts[containerContract].Hash)
+ case containerContract:
+ // In case if NNS is updated multiple times, we can't calculate
+ // it's actual hash based on local data, thus query chain.
+ nnsCs, err := c.Client.GetContractStateByID(1)
+ if err != nil {
+ panic("NNS is not yet deployed")
+ }
+ items = append(items,
+ c.Contracts[netmapContract].Hash,
+ c.Contracts[balanceContract].Hash,
+ c.Contracts[frostfsIDContract].Hash,
+ nnsCs.Hash,
+ "container")
+ case frostfsIDContract:
+ items = append(items,
+ c.Contracts[netmapContract].Hash,
+ c.Contracts[containerContract].Hash)
+ case netmapContract:
+ md := getDefaultNetmapContractConfigMap()
+ if method == updateMethodName {
+ arr, err := c.getNetConfigFromNetmapContract()
+ if err != nil {
+ panic(err)
+ }
+ m, err := parseConfigFromNetmapContract(arr)
+ if err != nil {
+ panic(err)
+ }
+ for k, v := range m {
+ for _, key := range netmapConfigKeys {
+ if k == key {
+ md[k] = v
+ break
+ }
+ }
+ }
+ }
+
+ var configParam []any
+ for k, v := range md {
+ configParam = append(configParam, k, v)
+ }
+
+ items = append(items,
+ c.Contracts[balanceContract].Hash,
+ c.Contracts[containerContract].Hash,
+ keysParam,
+ configParam)
+ case proxyContract:
+ items = nil
+ default:
+ panic(fmt.Sprintf("invalid contract name: %s", ctrName))
+ }
+ return items
+}
+
+func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item, error) {
+ cs, err := c.Client.GetContractStateByID(1)
+ if err != nil {
+ return nil, fmt.Errorf("NNS is not yet deployed: %w", err)
+ }
+ nmHash, err := nnsResolveHash(c.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
+ if err != nil {
+ return nil, fmt.Errorf("can't get netmap contract hash: %w", err)
+ }
+ arr, err := unwrap.Array(c.ReadOnlyInvoker.Call(nmHash, "listConfig"))
+ if err != nil {
+ return nil, fmt.Errorf("can't fetch list of network config keys from the netmap contract")
+ }
+ return arr, err
+}
+
+func (c *initializeContext) getAlphabetDeployItems(i, n int) []any {
+ items := make([]any, 6)
+ items[0] = false
+ items[1] = c.Contracts[netmapContract].Hash
+ items[2] = c.Contracts[proxyContract].Hash
+ items[3] = innerring.GlagoliticLetter(i).String()
+ items[4] = int64(i)
+ items[5] = int64(n)
+ return items
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
new file mode 100644
index 000000000..6758b4dd8
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
@@ -0,0 +1,303 @@
+package morph
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ nnsClient "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+)
+
+const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
+
+const frostfsOpsEmail = "ops@frostfs.info"
+
+func (c *initializeContext) setNNS() error {
+ nnsCs, err := c.Client.GetContractStateByID(1)
+ if err != nil {
+ return err
+ }
+
+ ok, err := c.nnsRootRegistered(nnsCs.Hash, "frostfs")
+ if err != nil {
+ return err
+ } else if !ok {
+ bw := io.NewBufBinWriter()
+ emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
+ "frostfs", c.CommitteeAcc.Contract.ScriptHash(),
+ frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+ if err := c.sendCommitteeTx(bw.Bytes(), true); err != nil {
+ return fmt.Errorf("can't add domain root to NNS: %w", err)
+ }
+ if err := c.awaitTx(); err != nil {
+ return err
+ }
+ }
+
+ alphaCs := c.getContract(alphabetContract)
+ for i, acc := range c.Accounts {
+ alphaCs.Hash = state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name)
+
+ domain := getAlphabetNNSDomain(i)
+ if err := c.nnsRegisterDomain(nnsCs.Hash, alphaCs.Hash, domain); err != nil {
+ return err
+ }
+ c.Command.Printf("NNS: Set %s -> %s\n", domain, alphaCs.Hash.StringLE())
+ }
+
+ for _, ctrName := range contractList {
+ cs := c.getContract(ctrName)
+
+ domain := ctrName + ".frostfs"
+ if err := c.nnsRegisterDomain(nnsCs.Hash, cs.Hash, domain); err != nil {
+ return err
+ }
+ c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE())
+ }
+
+ groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey()
+ err = c.updateNNSGroup(nnsCs.Hash, groupKey)
+ if err != nil {
+ return err
+ }
+ c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes()))
+
+ return c.awaitTx()
+}
+
+func (c *initializeContext) updateNNSGroup(nnsHash util.Uint160, pub *keys.PublicKey) error {
+ bw := io.NewBufBinWriter()
+ keyAlreadyAdded, domainRegCodeEmitted, err := c.emitUpdateNNSGroupScript(bw, nnsHash, pub)
+ if keyAlreadyAdded || err != nil {
+ return err
+ }
+
+ script := bw.Bytes()
+ if domainRegCodeEmitted {
+ w := io.NewBufBinWriter()
+ emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
+ wrapRegisterScriptWithPrice(w, nnsHash, script)
+ script = w.Bytes()
+ }
+
+ return c.sendCommitteeTx(script, true)
+}
+
+// emitUpdateNNSGroupScript emits script for updating group key stored in NNS.
+// First return value is true iff the key is already there and nothing should be done.
+// Second return value is true iff a domain registration code was emitted.
+func (c *initializeContext) emitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHash util.Uint160, pub *keys.PublicKey) (bool, bool, error) {
+ isAvail, err := nnsIsAvailable(c.Client, nnsHash, morphClient.NNSGroupKeyName)
+ if err != nil {
+ return false, false, err
+ }
+
+ if !isAvail {
+ currentPub, err := nnsResolveKey(c.ReadOnlyInvoker, nnsHash, morphClient.NNSGroupKeyName)
+ if err != nil {
+ return false, false, err
+ }
+
+ if pub.Equal(currentPub) {
+ return true, false, nil
+ }
+ }
+
+ if isAvail {
+ emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
+ morphClient.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(),
+ frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+ }
+
+ emit.AppCall(bw.BinWriter, nnsHash, "deleteRecords", callflag.All, "group.frostfs", int64(nns.TXT))
+ emit.AppCall(bw.BinWriter, nnsHash, "addRecord", callflag.All,
+ "group.frostfs", int64(nns.TXT), hex.EncodeToString(pub.Bytes()))
+
+ return false, isAvail, nil
+}
+
+func getAlphabetNNSDomain(i int) string {
+ return alphabetContract + strconv.FormatUint(uint64(i), 10) + ".frostfs"
+}
+
+// wrapRegisterScriptWithPrice wraps a given script with `getPrice`/`setPrice` calls for NNS.
+// It is intended to be used for a single transaction, and not as a part of other scripts.
+// It is assumed that script already contains static slot initialization code, the first one
+// (with index 0) is used to store the price.
+func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []byte) {
+ if len(s) == 0 {
+ return
+ }
+
+ emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All)
+ emit.Opcodes(w.BinWriter, opcode.STSFLD0)
+ emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1)
+
+ w.WriteBytes(s)
+
+ emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
+ emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
+
+ if w.Err != nil {
+ panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
+ }
+}
+
+func (c *initializeContext) nnsRegisterDomainScript(nnsHash, expectedHash util.Uint160, domain string) ([]byte, bool, error) {
+ ok, err := nnsIsAvailable(c.Client, nnsHash, domain)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if ok {
+ bw := io.NewBufBinWriter()
+ emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
+ domain, c.CommitteeAcc.Contract.ScriptHash(),
+ frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
+ emit.Opcodes(bw.BinWriter, opcode.ASSERT)
+
+ if bw.Err != nil {
+ panic(bw.Err)
+ }
+ return bw.Bytes(), false, nil
+ }
+
+ s, err := nnsResolveHash(c.ReadOnlyInvoker, nnsHash, domain)
+ if err != nil {
+ return nil, false, err
+ }
+ return nil, s == expectedHash, nil
+}
+
+func (c *initializeContext) nnsRegisterDomain(nnsHash, expectedHash util.Uint160, domain string) error {
+ script, ok, err := c.nnsRegisterDomainScript(nnsHash, expectedHash, domain)
+ if ok || err != nil {
+ return err
+ }
+
+ w := io.NewBufBinWriter()
+ emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
+ wrapRegisterScriptWithPrice(w, nnsHash, script)
+
+ emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT))
+ emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
+ domain, int64(nns.TXT), expectedHash.StringLE())
+ emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
+ domain, int64(nns.TXT), address.Uint160ToString(expectedHash))
+ return c.sendCommitteeTx(w.Bytes(), true)
+}
+
+func (c *initializeContext) nnsRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
+ res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
+ if err != nil {
+ return false, err
+ }
+
+ return res.State == vmstate.Halt.String(), nil
+}
+
+var errMissingNNSRecord = errors.New("missing NNS record")
+
+// Returns errMissingNNSRecord if invocation fault exception contains "token not found".
+func nnsResolveHash(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (util.Uint160, error) {
+ item, err := nnsResolve(inv, nnsHash, domain)
+ if err != nil {
+ return util.Uint160{}, err
+ }
+ return parseNNSResolveResult(item)
+}
+
+func nnsResolve(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
+ return unwrap.Item(inv.Call(nnsHash, "resolve", domain, int64(nns.TXT)))
+}
+
+func nnsResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*keys.PublicKey, error) {
+ res, err := nnsResolve(inv, nnsHash, domain)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := res.Value().(stackitem.Null); ok {
+ return nil, errors.New("NNS record is missing")
+ }
+ arr, ok := res.Value().([]stackitem.Item)
+ if !ok {
+ return nil, errors.New("API of the NNS contract method `resolve` has changed")
+ }
+ for i := range arr {
+ var bs []byte
+ bs, err = arr[i].TryBytes()
+ if err != nil {
+ continue
+ }
+
+ return keys.NewPublicKeyFromString(string(bs))
+ }
+ return nil, errors.New("no valid keys are found")
+}
+
+// parseNNSResolveResult parses the result of resolving NNS record.
+// It works with multiple formats (corresponding to multiple NNS versions).
+// If array of hashes is provided, it returns only the first one.
+func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
+ arr, ok := res.Value().([]stackitem.Item)
+ if !ok {
+ arr = []stackitem.Item{res}
+ }
+ if _, ok := res.Value().(stackitem.Null); ok || len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
+ }
+ for i := range arr {
+ bs, err := arr[i].TryBytes()
+ if err != nil {
+ continue
+ }
+
+ // We support several formats for hash encoding, this logic should be maintained in sync
+ // with nnsResolve from pkg/morph/client/nns.go
+ h, err := util.Uint160DecodeStringLE(string(bs))
+ if err == nil {
+ return h, nil
+ }
+
+ h, err = address.StringToUint160(string(bs))
+ if err == nil {
+ return h, nil
+ }
+ }
+ return util.Uint160{}, errors.New("no valid hashes are found")
+}
+
+func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
+ switch c.(type) {
+ case *rpcclient.Client:
+ inv := invoker.New(c, nil)
+ reader := nnsClient.NewReader(inv, nnsHash)
+ return reader.IsAvailable(name)
+ default:
+ b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
+ if err != nil {
+ return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
+ }
+
+ return b, nil
+ }
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
similarity index 60%
rename from cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
rename to cmd/frostfs-adm/internal/modules/morph/initialize_register.go
index 7b7597d91..469b269de 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
@@ -1,17 +1,15 @@
-package initialize
+package morph
import (
+ "errors"
"fmt"
- "math/big"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
@@ -26,9 +24,8 @@ const (
registerBatchSize = transaction.MaxAttributes - 1
)
-func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
- reader := neo.NewReader(c.ReadOnlyInvoker)
- regPrice, err := reader.GetRegisterPrice()
+func (c *initializeContext) registerCandidateRange(start, end int) error {
+ regPrice, err := c.getCandidateRegisterPrice()
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@@ -40,14 +37,16 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT)
}
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
- assert.NoError(w.Err)
+ if w.Err != nil {
+ panic(fmt.Sprintf("BUG: %v", w.Err))
+ }
- signers := []actor.SignerAccount{{
- Signer: c.GetSigner(false, c.CommitteeAcc),
+ signers := []rpcclient.SignerAccount{{
+ Signer: c.getSigner(false, c.CommitteeAcc),
Account: c.CommitteeAcc,
}}
for _, acc := range c.Accounts[start:end] {
- signers = append(signers, actor.SignerAccount{
+ signers = append(signers, rpcclient.SignerAccount{
Signer: transaction.Signer{
Account: acc.Contract.ScriptHash(),
Scopes: transaction.CustomContracts,
@@ -57,15 +56,11 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
})
}
- act, err := actor.New(c.Client, signers)
- if err != nil {
- return fmt.Errorf("can't create actor: %w", err)
- }
- tx, err := act.MakeRun(w.Bytes())
+ tx, err := c.Client.CreateTxFromScript(w.Bytes(), c.CommitteeAcc, -1, 0, signers)
if err != nil {
return fmt.Errorf("can't create tx: %w", err)
}
- if err := c.MultiSign(tx, constants.CommitteeAccountName); err != nil {
+ if err := c.multiSign(tx, committeeAccountName); err != nil {
return fmt.Errorf("can't sign a transaction: %w", err)
}
@@ -76,10 +71,10 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
}
}
- return c.SendTx(tx, c.Command, true)
+ return c.sendTx(tx, c.Command, true)
}
-func registerCandidates(c *helper.InitializeContext) error {
+func (c *initializeContext) registerCandidates() error {
cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neo.Hash, "getCandidates"))
if err != nil {
return fmt.Errorf("`getCandidates`: %w", err)
@@ -96,12 +91,15 @@ func registerCandidates(c *helper.InitializeContext) error {
// Register candidates in batches in order to overcome the signers amount limit.
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
for i := 0; i < need; i += registerBatchSize {
- start, end := i, min(i+registerBatchSize, need)
+ start, end := i, i+registerBatchSize
+ if end > need {
+ end = need
+ }
// This check is sound because transactions are accepted/rejected atomically.
if have >= end {
continue
}
- if err := registerCandidateRange(c, start, end); err != nil {
+ if err := c.registerCandidateRange(start, end); err != nil {
return fmt.Errorf("registering candidates %d..%d: %q", start, end-1, err)
}
}
@@ -109,15 +107,15 @@ func registerCandidates(c *helper.InitializeContext) error {
return nil
}
-func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
+func (c *initializeContext) transferNEOToAlphabetContracts() error {
neoHash := neo.Hash
- ok, err := transferNEOFinished(c)
+ ok, err := c.transferNEOFinished(neoHash)
if ok || err != nil {
return err
}
- cs := c.GetContract(constants.AlphabetContract)
+ cs := c.getContract(alphabetContract)
amount := initialAlphabetNEOAmount / len(c.Wallets)
bw := io.NewBufBinWriter()
@@ -128,15 +126,39 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
- if err := c.SendCommitteeTx(bw.Bytes(), false); err != nil {
+ if err := c.sendCommitteeTx(bw.Bytes(), false); err != nil {
return err
}
- return c.AwaitTx()
+ return c.awaitTx()
}
-func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
- r := neo.NewReader(c.ReadOnlyInvoker)
- bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
- return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
+func (c *initializeContext) transferNEOFinished(neoHash util.Uint160) (bool, error) {
+ bal, err := c.Client.NEP17BalanceOf(neoHash, c.CommitteeAcc.Contract.ScriptHash())
+ return bal < native.NEOTotalSupply, err
+}
+
+var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
+
+func (c *initializeContext) getCandidateRegisterPrice() (int64, error) {
+ switch c.Client.(type) {
+ case *rpcclient.Client:
+ inv := invoker.New(c.Client, nil)
+ reader := neo.NewReader(inv)
+ return reader.GetRegisterPrice()
+ default:
+ neoHash := neo.Hash
+ res, err := invokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
+ if err != nil {
+ return 0, err
+ }
+ if len(res.Stack) == 0 {
+ return 0, errGetPriceInvalid
+ }
+ bi, err := res.Stack[0].TryInteger()
+ if err != nil || !bi.IsInt64() {
+ return 0, errGetPriceInvalid
+ }
+ return bi.Int64(), nil
+ }
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize_roles.go
similarity index 58%
rename from cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
rename to cmd/frostfs-adm/internal/modules/morph/initialize_roles.go
index 05bc83a8b..46466602e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_roles.go
@@ -1,9 +1,6 @@
-package initialize
+package morph
import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt"
@@ -11,8 +8,8 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
)
-func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
- if ok, err := setRolesFinished(c); ok || err != nil {
+func (c *initializeContext) setNotaryAndAlphabetNodes() error {
+ if ok, err := c.setRolesFinished(); ok || err != nil {
if err == nil {
c.Command.Println("Stage 2: already performed.")
}
@@ -30,23 +27,19 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
emit.AppCall(w.BinWriter, rolemgmt.Hash, "designateAsRole",
callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs)
- if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return fmt.Errorf("send committee transaction: %w", err)
+ if err := c.sendCommitteeTx(w.Bytes(), false); err != nil {
+ return err
}
- err := c.AwaitTx()
- if err != nil {
- err = fmt.Errorf("await committee transaction: %w", err)
- }
- return err
+ return c.awaitTx()
}
-func setRolesFinished(c *helper.InitializeContext) (bool, error) {
+func (c *initializeContext) setRolesFinished() (bool, error) {
height, err := c.Client.GetBlockCount()
if err != nil {
return false, err
}
- pubs, err := helper.GetDesignatedByRole(c.ReadOnlyInvoker, rolemgmt.Hash, noderoles.NeoFSAlphabet, height)
+ pubs, err := getDesignatedByRole(c.ReadOnlyInvoker, rolemgmt.Hash, noderoles.NeoFSAlphabet, height)
return len(pubs) == len(c.Wallets), err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
similarity index 51%
rename from cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
rename to cmd/frostfs-adm/internal/modules/morph/initialize_test.go
index 9bc51c055..30a7168dd 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
@@ -1,4 +1,4 @@
-package initialize
+package morph
import (
"encoding/hex"
@@ -9,14 +9,6 @@ import (
"testing"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- cmdConfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/generate"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/node"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -28,7 +20,7 @@ import (
)
const (
- contractsPath = "../../../../../../contract/frostfs-contract-v0.18.0.tar.gz"
+ contractsPath = "../../../../../../frostfs-contract/frostfs-contract-v0.16.0.tar.gz"
protoFileName = "proto.yml"
)
@@ -50,10 +42,10 @@ func TestInitialize(t *testing.T) {
testInitialize(t, 16)
})
t.Run("max nodes", func(t *testing.T) {
- testInitialize(t, constants.MaxAlphabetNodes)
+ testInitialize(t, maxAlphabetNodes)
})
t.Run("too many nodes", func(t *testing.T) {
- require.ErrorIs(t, generateTestData(t.TempDir(), constants.MaxAlphabetNodes+1), helper.ErrTooManyAlphabetNodes)
+ require.ErrorIs(t, generateTestData(t, t.TempDir(), maxAlphabetNodes+1), ErrTooManyAlphabetNodes)
})
}
@@ -61,66 +53,60 @@ func testInitialize(t *testing.T, committeeSize int) {
testdataDir := t.TempDir()
v := viper.GetViper()
- require.NoError(t, generateTestData(testdataDir, committeeSize))
- v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
+ require.NoError(t, generateTestData(t, testdataDir, committeeSize))
+ v.Set(protoConfigPath, filepath.Join(testdataDir, protoFileName))
// Set to the path or remove the next statement to download from the network.
- require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath))
-
- dumpPath := filepath.Join(testdataDir, "out")
- require.NoError(t, Cmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath))
- v.Set(commonflags.AlphabetWalletsFlag, testdataDir)
- v.Set(commonflags.EpochDurationInitFlag, 1)
- v.Set(commonflags.MaxObjectSizeInitFlag, 1024)
+ require.NoError(t, initCmd.Flags().Set(contractsInitFlag, contractsPath))
+ v.Set(localDumpFlag, filepath.Join(testdataDir, "out"))
+ v.Set(alphabetWalletsFlag, testdataDir)
+ v.Set(epochDurationInitFlag, 1)
+ v.Set(maxObjectSizeInitFlag, 1024)
setTestCredentials(v, committeeSize)
- require.NoError(t, initializeSideChainCmd(Cmd, nil))
+ require.NoError(t, initializeSideChainCmd(initCmd, nil))
t.Run("force-new-epoch", func(t *testing.T) {
- require.NoError(t, netmap.ForceNewEpoch.Flags().Set(commonflags.LocalDumpFlag, dumpPath))
- require.NoError(t, netmap.ForceNewEpochCmd(netmap.ForceNewEpoch, nil))
+ require.NoError(t, forceNewEpochCmd(forceNewEpoch, nil))
})
t.Run("set-config", func(t *testing.T) {
- require.NoError(t, cmdConfig.SetCmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath))
- require.NoError(t, cmdConfig.SetConfigCmd(cmdConfig.SetCmd, []string{"MaintenanceModeAllowed=true"}))
+ require.NoError(t, setConfigCmd(setConfig, []string{"MaintenanceModeAllowed=true"}))
})
t.Run("set-policy", func(t *testing.T) {
- require.NoError(t, policy.Set.Flags().Set(commonflags.LocalDumpFlag, dumpPath))
- require.NoError(t, policy.SetPolicyCmd(policy.Set, []string{"ExecFeeFactor=1"}))
+ require.NoError(t, setPolicyCmd(setPolicy, []string{"ExecFeeFactor=1"}))
})
t.Run("remove-node", func(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pub := hex.EncodeToString(pk.PublicKey().Bytes())
- require.NoError(t, node.RemoveCmd.Flags().Set(commonflags.LocalDumpFlag, dumpPath))
- require.NoError(t, node.RemoveNodesCmd(node.RemoveCmd, []string{pub}))
+ require.NoError(t, removeNodesCmd(removeNodes, []string{pub}))
})
}
-func generateTestData(dir string, size int) error {
+func generateTestData(t *testing.T, dir string, size int) error {
v := viper.GetViper()
- v.Set(commonflags.AlphabetWalletsFlag, dir)
+ v.Set(alphabetWalletsFlag, dir)
sizeStr := strconv.FormatUint(uint64(size), 10)
- if err := generate.GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, sizeStr); err != nil {
+ if err := generateAlphabetCmd.Flags().Set(alphabetSizeFlag, sizeStr); err != nil {
return err
}
setTestCredentials(v, size)
- if err := generate.AlphabetCreds(generate.GenerateAlphabetCmd, nil); err != nil {
+ if err := generateAlphabetCreds(generateAlphabetCmd, nil); err != nil {
return err
}
var pubs []string
- for i := range size {
+ for i := 0; i < size; i++ {
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
if err != nil {
return fmt.Errorf("wallet doesn't exist: %w", err)
}
for _, acc := range w.Accounts {
- if acc.Label == constants.SingleAccountName {
+ if acc.Label == singleAccountName {
pub, ok := vm.ParseSignatureContract(acc.Contract.Script)
if !ok {
return fmt.Errorf("could not parse signature script for %s", acc.Address)
@@ -148,8 +134,8 @@ func generateTestData(dir string, size int) error {
}
func setTestCredentials(v *viper.Viper, size int) {
- for i := range size {
+ for i := 0; i < size; i++ {
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
}
- v.Set("credentials.contract", constants.TestContractPassword)
+ v.Set("credentials.contract", testContractPassword)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize_transfer.go
new file mode 100644
index 000000000..1f8e53416
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_transfer.go
@@ -0,0 +1,190 @@
+package morph
+
+import (
+ "fmt"
+
+ "github.com/nspcc-dev/neo-go/pkg/core/native"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ scContext "github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+)
+
+const (
+ gasInitialTotalSupply = 30000000 * native.GASFactor
+ // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
+ initialAlphabetGASAmount = 10_000 * native.GASFactor
+ // initialProxyGASAmount represents the amount of GAS given to a proxy contract.
+ initialProxyGASAmount = 50_000 * native.GASFactor
+)
+
+func (c *initializeContext) transferFunds() error {
+ ok, err := c.transferFundsFinished()
+ if ok || err != nil {
+ if err == nil {
+ c.Command.Println("Stage 1: already performed.")
+ }
+ return err
+ }
+
+ var transfers []rpcclient.TransferTarget
+ for _, acc := range c.Accounts {
+ to := acc.Contract.ScriptHash()
+ transfers = append(transfers,
+ rpcclient.TransferTarget{
+ Token: gas.Hash,
+ Address: to,
+ Amount: initialAlphabetGASAmount,
+ },
+ )
+ }
+
+ // It is convenient to have all funds at the committee account.
+ transfers = append(transfers,
+ rpcclient.TransferTarget{
+ Token: gas.Hash,
+ Address: c.CommitteeAcc.Contract.ScriptHash(),
+ Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
+ },
+ rpcclient.TransferTarget{
+ Token: neo.Hash,
+ Address: c.CommitteeAcc.Contract.ScriptHash(),
+ Amount: native.NEOTotalSupply,
+ },
+ )
+
+ tx, err := createNEP17MultiTransferTx(c.Client, c.ConsensusAcc, 0, transfers, []rpcclient.SignerAccount{{
+ Signer: transaction.Signer{
+ Account: c.ConsensusAcc.Contract.ScriptHash(),
+ Scopes: transaction.CalledByEntry,
+ },
+ Account: c.ConsensusAcc,
+ }})
+ if err != nil {
+ return fmt.Errorf("can't create transfer transaction: %w", err)
+ }
+
+ if err := c.multiSignAndSend(tx, consensusAccountName); err != nil {
+ return fmt.Errorf("can't send transfer transaction: %w", err)
+ }
+
+ return c.awaitTx()
+}
+
+func (c *initializeContext) transferFundsFinished() (bool, error) {
+ acc := c.Accounts[0]
+
+ res, err := c.Client.NEP17BalanceOf(gas.Hash, acc.Contract.ScriptHash())
+ return res > initialAlphabetGASAmount/2, err
+}
+
+func (c *initializeContext) multiSignAndSend(tx *transaction.Transaction, accType string) error {
+ if err := c.multiSign(tx, accType); err != nil {
+ return err
+ }
+
+ return c.sendTx(tx, c.Command, false)
+}
+
+func (c *initializeContext) multiSign(tx *transaction.Transaction, accType string) error {
+ network, err := c.Client.GetNetwork()
+ if err != nil {
+ // error appears only if client
+ // has not been initialized
+ panic(err)
+ }
+
+ // Use parameter context to avoid dealing with signature order.
+ pc := scContext.NewParameterContext("", network, tx)
+ h := c.CommitteeAcc.Contract.ScriptHash()
+ if accType == consensusAccountName {
+ h = c.ConsensusAcc.Contract.ScriptHash()
+ }
+ for _, w := range c.Wallets {
+ acc, err := getWalletAccount(w, accType)
+ if err != nil {
+ return fmt.Errorf("can't find %s wallet account: %w", accType, err)
+ }
+
+ priv := acc.PrivateKey()
+ sign := priv.SignHashable(uint32(network), tx)
+ if err := pc.AddSignature(h, acc.Contract, priv.PublicKey(), sign); err != nil {
+ return fmt.Errorf("can't add signature: %w", err)
+ }
+ if len(pc.Items[h].Signatures) == len(acc.Contract.Parameters) {
+ break
+ }
+ }
+
+ w, err := pc.GetWitness(h)
+ if err != nil {
+ return fmt.Errorf("incomplete signature: %w", err)
+ }
+
+ for i := range tx.Signers {
+ if tx.Signers[i].Account == h {
+ if i < len(tx.Scripts) {
+ tx.Scripts[i] = *w
+ } else if i == len(tx.Scripts) {
+ tx.Scripts = append(tx.Scripts, *w)
+ } else {
+ panic("BUG: invalid signing order")
+ }
+ return nil
+ }
+ }
+
+ return fmt.Errorf("%s account was not found among transaction signers", accType)
+}
+
+func (c *initializeContext) transferGASToProxy() error {
+ proxyCs := c.getContract(proxyContract)
+
+ bal, err := c.Client.NEP17BalanceOf(gas.Hash, proxyCs.Hash)
+ if err != nil || bal > 0 {
+ return err
+ }
+
+ tx, err := createNEP17MultiTransferTx(c.Client, c.CommitteeAcc, 0, []rpcclient.TransferTarget{{
+ Token: gas.Hash,
+ Address: proxyCs.Hash,
+ Amount: initialProxyGASAmount,
+ }}, nil)
+ if err != nil {
+ return err
+ }
+
+ if err := c.multiSignAndSend(tx, committeeAccountName); err != nil {
+ return err
+ }
+
+ return c.awaitTx()
+}
+
+func createNEP17MultiTransferTx(c Client, acc *wallet.Account, netFee int64,
+ recipients []rpcclient.TransferTarget, cosigners []rpcclient.SignerAccount) (*transaction.Transaction, error) {
+ from := acc.Contract.ScriptHash()
+
+ w := io.NewBufBinWriter()
+ for i := range recipients {
+ emit.AppCall(w.BinWriter, recipients[i].Token, "transfer", callflag.All,
+ from, recipients[i].Address, recipients[i].Amount, recipients[i].Data)
+ emit.Opcodes(w.BinWriter, opcode.ASSERT)
+ }
+ if w.Err != nil {
+ return nil, fmt.Errorf("failed to create transfer script: %w", w.Err)
+ }
+ return c.CreateTxFromScript(w.Bytes(), acc, -1, netFee, append([]rpcclient.SignerAccount{{
+ Signer: transaction.Signer{
+ Account: from,
+ Scopes: transaction.CalledByEntry,
+ },
+ Account: acc,
+ }}, cosigners...))
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/local_client.go b/cmd/frostfs-adm/internal/modules/morph/local_client.go
new file mode 100644
index 000000000..0367f7479
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/local_client.go
@@ -0,0 +1,504 @@
+package morph
+
+import (
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/nspcc-dev/neo-go/pkg/config"
+ "github.com/nspcc-dev/neo-go/pkg/config/netmode"
+ "github.com/nspcc-dev/neo-go/pkg/core"
+ "github.com/nspcc-dev/neo-go/pkg/core/block"
+ "github.com/nspcc-dev/neo-go/pkg/core/chaindump"
+ "github.com/nspcc-dev/neo-go/pkg/core/fee"
+ "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/core/storage"
+ "github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/hash"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+)
+
+type localClient struct {
+ bc *core.Blockchain
+ transactions []*transaction.Transaction
+ dumpPath string
+ accounts []*wallet.Account
+ maxGasInvoke int64
+}
+
+func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*localClient, error) {
+ cfg, err := config.LoadFile(v.GetString(protoConfigPath))
+ if err != nil {
+ return nil, err
+ }
+
+ bc, err := core.NewBlockchain(storage.NewMemoryStore(), cfg.Blockchain(), zap.NewNop())
+ if err != nil {
+ return nil, err
+ }
+
+ m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
+ accounts := make([]*wallet.Account, len(wallets))
+ for i := range accounts {
+ accounts[i], err = getWalletAccount(wallets[i], consensusAccountName)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ indexMap := make(map[string]int)
+ for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
+ indexMap[pub] = i
+ }
+
+ sort.Slice(accounts, func(i, j int) bool {
+ pi := accounts[i].PrivateKey().PublicKey().Bytes()
+ pj := accounts[j].PrivateKey().PublicKey().Bytes()
+ return indexMap[string(pi)] < indexMap[string(pj)]
+ })
+ sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
+ return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
+ })
+
+ go bc.Run()
+
+ if cmd.Name() != "init" {
+ f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("can't open local dump: %w", err)
+ }
+ defer f.Close()
+
+ r := io.NewBinReaderFromIO(f)
+
+ var skip uint32
+ if bc.BlockHeight() != 0 {
+ skip = bc.BlockHeight() + 1
+ }
+
+ count := r.ReadU32LE() - skip
+ if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
+ return nil, fmt.Errorf("can't restore local dump: %w", err)
+ }
+ }
+
+ return &localClient{
+ bc: bc,
+ dumpPath: dumpPath,
+ accounts: accounts[:m],
+ maxGasInvoke: 15_0000_0000,
+ }, nil
+}
+
+func (l *localClient) GetBlockCount() (uint32, error) {
+ return l.bc.BlockHeight(), nil
+}
+
+func (l *localClient) GetContractStateByID(id int32) (*state.Contract, error) {
+ h, err := l.bc.GetContractScriptHash(id)
+ if err != nil {
+ return nil, err
+ }
+ return l.GetContractStateByHash(h)
+}
+
+func (l *localClient) GetContractStateByHash(h util.Uint160) (*state.Contract, error) {
+ if cs := l.bc.GetContractState(h); cs != nil {
+ return cs, nil
+ }
+ return nil, storage.ErrKeyNotFound
+}
+
+func (l *localClient) GetNativeContracts() ([]state.NativeContract, error) {
+ return l.bc.GetNatives(), nil
+}
+
+func (l *localClient) GetNetwork() (netmode.Magic, error) {
+ return l.bc.GetConfig().Magic, nil
+}
+
+func (l *localClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*result.ApplicationLog, error) {
+ aer, err := l.bc.GetAppExecResults(h, *t)
+ if err != nil {
+ return nil, err
+ }
+
+ a := result.NewApplicationLog(h, aer, *t)
+ return &a, nil
+}
+
+func (l *localClient) CreateTxFromScript(script []byte, acc *wallet.Account, sysFee int64, netFee int64, cosigners []rpcclient.SignerAccount) (*transaction.Transaction, error) {
+ signers, accounts, err := getSigners(acc, cosigners)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct tx signers: %w", err)
+ }
+ if sysFee < 0 {
+ res, err := l.InvokeScript(script, signers)
+ if err != nil {
+ return nil, fmt.Errorf("can't add system fee to transaction: %w", err)
+ }
+ if res.State != "HALT" {
+ return nil, fmt.Errorf("can't add system fee to transaction: bad vm state: %s due to an error: %s", res.State, res.FaultException)
+ }
+ sysFee = res.GasConsumed
+ }
+
+ tx := transaction.New(script, sysFee)
+ tx.Signers = signers
+ tx.ValidUntilBlock = l.bc.BlockHeight() + 2
+
+ err = l.AddNetworkFee(tx, netFee, accounts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to add network fee: %w", err)
+ }
+
+ return tx, nil
+}
+
+func (l *localClient) GetCommittee() (keys.PublicKeys, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+// InvokeFunction is implemented via `InvokeScript`.
+func (l *localClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
+ var err error
+
+ pp := make([]any, len(sPrm))
+ for i, p := range sPrm {
+ pp[i], err = smartcontract.ExpandParameterToEmitable(p)
+ if err != nil {
+ return nil, fmt.Errorf("incorrect parameter type %s: %w", p.Type, err)
+ }
+ }
+
+ return invokeFunction(l, h, method, pp, ss)
+}
+
+func (l *localClient) CalculateNotaryFee(_ uint8) (int64, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+func (l *localClient) SignAndPushP2PNotaryRequest(_ *transaction.Transaction, _ []byte, _ int64, _ int64, _ uint32, _ *wallet.Account) (*payload.P2PNotaryRequest, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+func (l *localClient) SignAndPushInvocationTx(_ []byte, _ *wallet.Account, _ int64, _ fixedn.Fixed8, _ []rpcclient.SignerAccount) (util.Uint256, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+func (l *localClient) TerminateSession(_ uuid.UUID) (bool, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+func (l *localClient) TraverseIterator(_, _ uuid.UUID, _ int) ([]stackitem.Item, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+// GetVersion return default version.
+func (l *localClient) GetVersion() (*result.Version, error) {
+ c := l.bc.GetConfig()
+ return &result.Version{
+ Protocol: result.Protocol{
+ AddressVersion: address.NEO3Prefix,
+ Network: c.Magic,
+ MillisecondsPerBlock: int(c.TimePerBlock / time.Millisecond),
+ MaxTraceableBlocks: c.MaxTraceableBlocks,
+ MaxValidUntilBlockIncrement: c.MaxValidUntilBlockIncrement,
+ MaxTransactionsPerBlock: c.MaxTransactionsPerBlock,
+ MemoryPoolMaxTransactions: c.MemPoolSize,
+ ValidatorsCount: byte(c.ValidatorsCount),
+ InitialGasDistribution: c.InitialGASSupply,
+ CommitteeHistory: c.CommitteeHistory,
+ P2PSigExtensions: c.P2PSigExtensions,
+ StateRootInHeader: c.StateRootInHeader,
+ ValidatorsHistory: c.ValidatorsHistory,
+ },
+ }, nil
+}
+
+func (l *localClient) InvokeContractVerify(util.Uint160, []smartcontract.Parameter, []transaction.Signer, ...transaction.Witness) (*result.Invoke, error) {
+ // not used by `morph init` command
+ panic("unexpected call")
+}
+
+// CalculateNetworkFee calculates network fee for the given transaction.
+// Copied from neo-go with minor corrections (no need to support non-notary mode):
+// https://github.com/nspcc-dev/neo-go/blob/v0.99.2/pkg/services/rpcsrv/server.go#L744
+func (l *localClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) {
+ hashablePart, err := tx.EncodeHashableFields()
+ if err != nil {
+ return 0, fmt.Errorf("failed to compute tx size: %w", err)
+ }
+
+ size := len(hashablePart) + io.GetVarSize(len(tx.Signers))
+ ef := l.bc.GetBaseExecFee()
+
+ var netFee int64
+ for i, signer := range tx.Signers {
+ var verificationScript []byte
+ for _, w := range tx.Scripts {
+ if w.VerificationScript != nil && hash.Hash160(w.VerificationScript).Equals(signer.Account) {
+ verificationScript = w.VerificationScript
+ break
+ }
+ }
+ if verificationScript == nil {
+ gasConsumed, err := l.bc.VerifyWitness(signer.Account, tx, &tx.Scripts[i], l.maxGasInvoke)
+ if err != nil {
+ return 0, fmt.Errorf("invalid signature: %w", err)
+ }
+ netFee += gasConsumed
+ size += io.GetVarSize([]byte{}) + io.GetVarSize(tx.Scripts[i].InvocationScript)
+ continue
+ }
+
+ fee, sizeDelta := fee.Calculate(ef, verificationScript)
+ netFee += fee
+ size += sizeDelta
+ }
+
+ fee := l.bc.FeePerByte()
+ netFee += int64(size) * fee
+
+ return netFee, nil
+}
+
+// AddNetworkFee adds network fee for each witness script and optional extra
+// network fee to transaction. `accs` is an array signer's accounts.
+// Copied from neo-go with minor corrections (no need to support contract signers):
+// https://github.com/nspcc-dev/neo-go/blob/6ff11baa1b9e4c71ef0d1de43b92a8c541ca732c/pkg/rpc/client/rpc.go#L960
+func (l *localClient) AddNetworkFee(tx *transaction.Transaction, extraFee int64, accs ...*wallet.Account) error {
+ if len(tx.Signers) != len(accs) {
+ return errors.New("number of signers must match number of scripts")
+ }
+
+ size := io.GetVarSize(tx)
+ ef := l.bc.GetBaseExecFee()
+ for i := range tx.Signers {
+ netFee, sizeDelta := fee.Calculate(ef, accs[i].Contract.Script)
+ tx.NetworkFee += netFee
+ size += sizeDelta
+ }
+
+ tx.NetworkFee += int64(size)*l.bc.FeePerByte() + extraFee
+ return nil
+}
+
+// getSigners returns an array of transaction signers and corresponding accounts from
+// given sender and cosigners. If cosigners list already contains sender, the sender
+// will be placed at the start of the list.
+// Copied from neo-go with minor corrections:
+// https://github.com/nspcc-dev/neo-go/blob/6ff11baa1b9e4c71ef0d1de43b92a8c541ca732c/pkg/rpc/client/rpc.go#L735
+func getSigners(sender *wallet.Account, cosigners []rpcclient.SignerAccount) ([]transaction.Signer, []*wallet.Account, error) {
+ var (
+ signers []transaction.Signer
+ accounts []*wallet.Account
+ )
+
+ from := sender.Contract.ScriptHash()
+ s := transaction.Signer{
+ Account: from,
+ Scopes: transaction.None,
+ }
+ for _, c := range cosigners {
+ if c.Signer.Account == from {
+ s = c.Signer
+ continue
+ }
+ signers = append(signers, c.Signer)
+ accounts = append(accounts, c.Account)
+ }
+ signers = append([]transaction.Signer{s}, signers...)
+ accounts = append([]*wallet.Account{sender}, accounts...)
+ return signers, accounts, nil
+}
+
+func (l *localClient) NEP17BalanceOf(h util.Uint160, acc util.Uint160) (int64, error) {
+ res, err := invokeFunction(l, h, "balanceOf", []any{acc}, nil)
+ if err != nil {
+ return 0, err
+ }
+ if res.State != vmstate.Halt.String() || len(res.Stack) == 0 {
+ return 0, fmt.Errorf("`balance`: invalid response (empty: %t): %s",
+ len(res.Stack) == 0, res.FaultException)
+ }
+ bi, err := res.Stack[0].TryInteger()
+ if err != nil || !bi.IsInt64() {
+ return 0, fmt.Errorf("`balance`: invalid response")
+ }
+ return bi.Int64(), nil
+}
+
+func (l *localClient) InvokeScript(script []byte, signers []transaction.Signer) (*result.Invoke, error) {
+ lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
+ if err != nil {
+ return nil, err
+ }
+
+ tx := transaction.New(script, 0)
+ tx.Signers = signers
+ tx.ValidUntilBlock = l.bc.BlockHeight() + 2
+
+ ic, err := l.bc.GetTestVM(trigger.Application, tx, &block.Block{
+ Header: block.Header{
+ Index: lastBlock.Index + 1,
+ Timestamp: lastBlock.Timestamp + 1,
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get test VM: %w", err)
+ }
+
+ ic.VM.GasLimit = 100_0000_0000
+ ic.VM.LoadScriptWithFlags(script, callflag.All)
+
+ var errStr string
+ if err := ic.VM.Run(); err != nil {
+ errStr = err.Error()
+ }
+ return &result.Invoke{
+ State: ic.VM.State().String(),
+ GasConsumed: ic.VM.GasConsumed(),
+ Script: script,
+ Stack: ic.VM.Estack().ToArray(),
+ FaultException: errStr,
+ }, nil
+}
+
+func (l *localClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
+ // We need to test that transaction was formed correctly to catch as many errors as we can.
+ bs := tx.Bytes()
+ _, err := transaction.NewTransactionFromBytes(bs)
+ if err != nil {
+ return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
+ }
+
+ l.transactions = append(l.transactions, tx)
+ return tx.Hash(), nil
+}
+
+func (l *localClient) putTransactions() error {
+ // 1. Prepare new block.
+ lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
+ if err != nil {
+ panic(err)
+ }
+ defer func() { l.transactions = l.transactions[:0] }()
+
+ b := &block.Block{
+ Header: block.Header{
+ NextConsensus: l.accounts[0].Contract.ScriptHash(),
+ Script: transaction.Witness{
+ VerificationScript: l.accounts[0].Contract.Script,
+ },
+ Timestamp: lastBlock.Timestamp + 1,
+ },
+ Transactions: l.transactions,
+ }
+
+ if l.bc.GetConfig().StateRootInHeader {
+ b.StateRootEnabled = true
+ b.PrevStateRoot = l.bc.GetStateModule().CurrentLocalStateRoot()
+ }
+ b.PrevHash = lastBlock.Hash()
+ b.Index = lastBlock.Index + 1
+ b.RebuildMerkleRoot()
+
+ // 2. Sign prepared block.
+ var invocationScript []byte
+
+ magic := l.bc.GetConfig().Magic
+ for _, acc := range l.accounts {
+ sign := acc.PrivateKey().SignHashable(uint32(magic), b)
+ invocationScript = append(invocationScript, byte(opcode.PUSHDATA1), 64)
+ invocationScript = append(invocationScript, sign...)
+ }
+ b.Script.InvocationScript = invocationScript
+
+ // 3. Persist block.
+ return l.bc.AddBlock(b)
+}
+
+func invokeFunction(c Client, h util.Uint160, method string, parameters []any, signers []transaction.Signer) (*result.Invoke, error) {
+ w := io.NewBufBinWriter()
+ emit.Array(w.BinWriter, parameters...)
+ emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
+ if w.Err != nil {
+ panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
+ }
+ return c.InvokeScript(w.Bytes(), signers)
+}
+
+var errGetDesignatedByRoleResponse = errors.New("`getDesignatedByRole`: invalid response")
+
+func getDesignatedByRole(inv *invoker.Invoker, h util.Uint160, role noderoles.Role, u uint32) (keys.PublicKeys, error) {
+ arr, err := unwrap.Array(inv.Call(h, "getDesignatedByRole", int64(role), int64(u)))
+ if err != nil {
+ return nil, errGetDesignatedByRoleResponse
+ }
+
+ pubs := make(keys.PublicKeys, len(arr))
+ for i := range arr {
+ bs, err := arr[i].TryBytes()
+ if err != nil {
+ return nil, errGetDesignatedByRoleResponse
+ }
+ pubs[i], err = keys.NewPublicKeyFromBytes(bs, elliptic.P256())
+ if err != nil {
+ return nil, errGetDesignatedByRoleResponse
+ }
+ }
+
+ return pubs, nil
+}
+
+func (l *localClient) dump() (err error) {
+ defer l.bc.Close()
+
+ f, err := os.Create(l.dumpPath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ closeErr := f.Close()
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }()
+
+ w := io.NewBinWriterFromIO(f)
+ w.WriteU32LE(l.bc.BlockHeight() + 1)
+ err = chaindump.Dump(l.bc, w, 0, l.bc.BlockHeight()+1)
+ return
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/n3client.go
similarity index 57%
rename from cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
rename to cmd/frostfs-adm/internal/modules/morph/n3client.go
index 3f3a66cb6..138943b6e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/n3client.go
@@ -1,16 +1,18 @@
-package helper
+package morph
import (
"context"
- "crypto/tls"
"errors"
"fmt"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "github.com/nspcc-dev/neo-go/pkg/config/netmode"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@@ -24,25 +26,39 @@ import (
// Client represents N3 client interface capable of test-invoking scripts
// and sending signed transactions to chain.
type Client interface {
- actor.RPCActor
+ invoker.RPCInvoke
- GetNativeContracts() ([]state.Contract, error)
+ GetBlockCount() (uint32, error)
+ GetContractStateByID(int32) (*state.Contract, error)
+ GetContractStateByHash(util.Uint160) (*state.Contract, error)
+ GetNativeContracts() ([]state.NativeContract, error)
+ GetNetwork() (netmode.Magic, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
+ GetVersion() (*result.Version, error)
+ CreateTxFromScript([]byte, *wallet.Account, int64, int64, []rpcclient.SignerAccount) (*transaction.Transaction, error)
+ NEP17BalanceOf(util.Uint160, util.Uint160) (int64, error)
+ SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
+ GetCommittee() (keys.PublicKeys, error)
+ CalculateNotaryFee(uint8) (int64, error)
+ CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
+ AddNetworkFee(*transaction.Transaction, int64, ...*wallet.Account) error
+ SignAndPushInvocationTx([]byte, *wallet.Account, int64, fixedn.Fixed8, []rpcclient.SignerAccount) (util.Uint256, error)
+ SignAndPushP2PNotaryRequest(*transaction.Transaction, []byte, int64, int64, uint32, *wallet.Account) (*payload.P2PNotaryRequest, error)
}
-type HashVUBPair struct {
- Hash util.Uint256
- Vub uint32
+type hashVUBPair struct {
+ hash util.Uint256
+ vub uint32
}
-type ClientContext struct {
+type clientContext struct {
Client Client // a raw neo-go client OR a local chain implementation
CommitteeAct *actor.Actor // committee actor with the Global witness scope
ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer
- SentTxs []HashVUBPair
+ SentTxs []hashVUBPair
}
-func NewRemoteClient(v *viper.Viper) (Client, error) {
+func getN3Client(v *viper.Viper) (Client, error) {
// number of opened connections
// by neo-go client per one host
const (
@@ -51,27 +67,13 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
)
ctx := context.Background()
- endpoint := v.GetString(commonflags.EndpointFlag)
+ endpoint := v.GetString(endpointFlag)
if endpoint == "" {
return nil, errors.New("missing endpoint")
}
-
- var cfg *tls.Config
- if rootCAs := v.GetStringSlice("tls.trusted_ca_list"); len(rootCAs) != 0 {
- certFile := v.GetString("tls.certificate")
- keyFile := v.GetString("tls.key")
-
- tlsConfig, err := rpcclient.TLSClientConfig(rootCAs, certFile, keyFile)
- if err != nil {
- return nil, err
- }
-
- cfg = tlsConfig
- }
c, err := rpcclient.New(ctx, endpoint, rpcclient.Options{
MaxConnsPerHost: maxConnsPerHost,
RequestTimeout: requestTimeout,
- TLSClientConfig: cfg,
})
if err != nil {
return nil, err
@@ -82,7 +84,7 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
return c, nil
}
-func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
+func defaultClientContext(c Client, committeeAcc *wallet.Account) (*clientContext, error) {
commAct, err := actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
Account: committeeAcc.Contract.ScriptHash(),
@@ -94,14 +96,14 @@ func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContex
return nil, err
}
- return &ClientContext{
+ return &clientContext{
Client: c,
CommitteeAct: commAct,
ReadOnlyInvoker: invoker.New(c, nil),
}, nil
}
-func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command, await bool) error {
+func (c *clientContext) sendTx(tx *transaction.Transaction, cmd *cobra.Command, await bool) error {
h, err := c.Client.SendRawTransaction(tx)
if err != nil {
return err
@@ -111,27 +113,10 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command,
return fmt.Errorf("sent and actual tx hashes mismatch:\n\tsent: %v\n\tactual: %v", tx.Hash().StringLE(), h.StringLE())
}
- c.SentTxs = append(c.SentTxs, HashVUBPair{Hash: h, Vub: tx.ValidUntilBlock})
+ c.SentTxs = append(c.SentTxs, hashVUBPair{hash: h, vub: tx.ValidUntilBlock})
if await {
- return c.AwaitTx(cmd)
+ return c.awaitTx(cmd)
}
return nil
}
-
-func (c *ClientContext) AwaitTx(cmd *cobra.Command) error {
- if len(c.SentTxs) == 0 {
- return nil
- }
-
- if local, ok := c.Client.(*LocalClient); ok {
- if err := local.putTransactions(); err != nil {
- return fmt.Errorf("can't persist transactions: %w", err)
- }
- }
-
- err := AwaitTx(cmd, c.Client, c.SentTxs)
- c.SentTxs = c.SentTxs[:0]
-
- return err
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
deleted file mode 100644
index 94223dbd0..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package netmap
-
-import (
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const deltaFlag = "delta"
-
-func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return fmt.Errorf("can't initialize context: %w", err)
- }
-
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- cs, err := helper.GetContractByID(r, 1)
- if err != nil {
- return fmt.Errorf("can't get NNS contract info: %w", err)
- }
-
- nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract))
- if err != nil {
- return fmt.Errorf("can't get netmap contract hash: %w", err)
- }
-
- bw := io.NewBufBinWriter()
- delta, _ := cmd.Flags().GetInt64(deltaFlag)
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
- return err
- }
-
- if err = wCtx.SendConsensusTx(bw.Bytes()); err == nil {
- err = wCtx.AwaitTx()
- }
- if err != nil && strings.Contains(err.Error(), "invalid epoch") {
- cmd.Println("Epoch has already ticked.")
- return nil
- }
- return err
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
deleted file mode 100644
index 291482e0f..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package netmap
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- CandidatesCmd = &cobra.Command{
- Use: "netmap-candidates",
- Short: "List netmap candidates nodes",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: listNetmapCandidatesNodes,
- }
- ForceNewEpoch = &cobra.Command{
- Use: "force-new-epoch",
- Short: "Create new FrostFS epoch event in the side chain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: ForceNewEpochCmd,
- }
-)
-
-func initNetmapCandidatesCmd() {
- CandidatesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
-}
-
-func initForceNewEpochCmd() {
- ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
- ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch")
-}
-
-func init() {
- initNetmapCandidatesCmd()
- initForceNewEpochCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap_candidates.go
similarity index 67%
rename from cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
rename to cmd/frostfs-adm/internal/modules/morph/netmap_candidates.go
index a689e0ec1..222b9902a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap_candidates.go
@@ -1,28 +1,24 @@
-package netmap
+package morph
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
- r := management.NewReader(inv)
- cs, err := helper.GetContractByID(r, 1)
+ cs, err := c.GetContractStateByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
- nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.NetmapContract))
+ nmHash, err := nnsResolveHash(inv, cs.Hash, netmapContract+".frostfs")
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
res, err := inv.Call(nmHash, "netmapCandidates")
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
new file mode 100644
index 000000000..fa7aa0af3
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
@@ -0,0 +1,44 @@
+package morph
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/spf13/viper"
+)
+
+func getDefaultNetmapContractConfigMap() map[string]any {
+ m := make(map[string]any)
+ m[netmap.EpochDurationConfig] = viper.GetInt64(epochDurationInitFlag)
+ m[netmap.MaxObjectSizeConfig] = viper.GetInt64(maxObjectSizeInitFlag)
+ m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag)
+ m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag)
+ m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag)
+ m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag)
+ m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)
+ m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(maintenanceModeAllowedInitFlag)
+ return m
+}
+
+func parseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) {
+ m := make(map[string][]byte, len(arr))
+ for _, param := range arr {
+ tuple, ok := param.Value().([]stackitem.Item)
+ if !ok || len(tuple) != 2 {
+ return nil, errors.New("invalid ListConfig response from netmap contract")
+ }
+
+ k, err := tuple[0].TryBytes()
+ if err != nil {
+ return nil, errors.New("invalid config key from netmap contract")
+ }
+
+ v, err := tuple[1].TryBytes()
+ if err != nil {
+ return nil, invalidConfigValueErr(string(k))
+ }
+ m[string(k)] = v
+ }
+ return m, nil
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
deleted file mode 100644
index 14f6eb390..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package nns
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func initRegisterCmd() {
- Cmd.AddCommand(registerCmd)
- registerCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- registerCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- registerCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- registerCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
- registerCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal, "SOA record REFRESH parameter")
- registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
- registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
- registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
- registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
-}
-
-func registerDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- email, _ := cmd.Flags().GetString(nnsEmailFlag)
- refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
- retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
- expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
- ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
-
- h, vub, err := c.Register(name, actor.Sender(), email, big.NewInt(refresh),
- big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
- commonCmd.ExitOnErr(cmd, "unable to register domain: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
- cmd.Println("Domain registered successfully")
-}
-
-func initDeleteCmd() {
- Cmd.AddCommand(deleteCmd)
- deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
-}
-
-func deleteDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- h, vub, err := c.DeleteDomain(name)
-
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
- cmd.Println("Domain deleted successfully")
-}
-
-func initSetAdminCmd() {
- Cmd.AddCommand(setAdminCmd)
- setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
- setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
- _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
-
- _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
-}
-
-func setAdmin(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
- commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
- h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
-
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
- cmd.Println("Set admin successfully")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
deleted file mode 100644
index e49f62256..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package nns
-
-import (
- "errors"
-
- client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
- v := viper.GetViper()
- c, err := helper.NewRemoteClient(v)
- commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
-
- alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag))
- walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
- adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
-
- var (
- alphabet *helper.AlphabetWallets
- regularWallets []*helper.RegularWallets
- )
-
- if alphabetWalletPath != "" {
- alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
- }
-
- if walletPath != "" {
- regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
- }
-
- if adminWalletPath != "" {
- regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
- }
-
- if alphabet == nil && regularWallets == nil {
- commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
- }
-
- ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
- commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
-
- r := management.NewReader(ac.Invoker)
- nnsCs, err := helper.GetContractByID(r, 1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
- return client.New(ac, nnsCs.Hash), ac
-}
-
-func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
-
- inv := invoker.New(c, nil)
- r := management.NewReader(inv)
- nnsCs, err := helper.GetContractByID(r, 1)
- commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
-
- return client.NewReader(inv, nnsCs.Hash), inv
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
deleted file mode 100644
index 9cb47356f..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package nns
-
-import (
- "errors"
- "math/big"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/spf13/cobra"
-)
-
-func initAddRecordCmd() {
- Cmd.AddCommand(addRecordCmd)
- addRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- addRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
- addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
- addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
- _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
- _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordDataFlag)
-}
-
-func initGetRecordsCmd() {
- Cmd.AddCommand(getRecordsCmd)
- getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
-
- _ = cobra.MarkFlagRequired(getRecordsCmd.Flags(), nnsNameFlag)
-}
-
-func initDelRecordsCmd() {
- Cmd.AddCommand(delRecordsCmd)
- delRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
- delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
- _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
-}
-
-func initDelRecordCmd() {
- Cmd.AddCommand(delRecordCmd)
- delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
- delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
- delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
-
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
- _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
-}
-
-func addRecord(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
- recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
- typ, err := getRecordType(recordType)
- commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
- h, vub, err := c.AddRecord(name, typ, data)
- commonCmd.ExitOnErr(cmd, "unable to add record: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "renew domain error: %w", err)
- cmd.Println("Record added successfully")
-}
-
-func getRecords(cmd *cobra.Command, _ []string) {
- c, inv := nnsReader(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
- if recordType == "" {
- sid, r, err := c.GetAllRecords(name)
- commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
- defer func() {
- _ = inv.TerminateSession(sid)
- }()
- items, err := inv.TraverseIterator(sid, &r, 0)
- commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
- for len(items) != 0 {
- for j := range items {
- rs := items[j].Value().([]stackitem.Item)
- bs, err := rs[2].TryBytes()
- commonCmd.ExitOnErr(cmd, "unable to parse record state: %w", err)
- cmd.Printf("%s %s\n",
- recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())),
- string(bs))
- }
- items, err = inv.TraverseIterator(sid, &r, 0)
- commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
- }
- } else {
- typ, err := getRecordType(recordType)
- commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
- items, err := c.GetRecords(name, typ)
- commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
- for _, item := range items {
- record, err := item.TryBytes()
- commonCmd.ExitOnErr(cmd, "unable to parse response: %w", err)
- cmd.Println(string(record))
- }
- }
-}
-
-func delRecords(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
- typ, err := getRecordType(recordType)
- commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
- h, vub, err := c.DeleteRecords(name, typ)
- commonCmd.ExitOnErr(cmd, "unable to delete records: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
- cmd.Println("Records removed successfully")
-}
-
-func delRecord(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
- recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
- typ, err := getRecordType(recordType)
- commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
- h, vub, err := c.DeleteRecord(name, typ, data)
- commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
- cmd.Println("Record removed successfully")
-}
-
-func getRecordType(recordType string) (*big.Int, error) {
- switch strings.ToUpper(recordType) {
- case "A":
- return big.NewInt(int64(nns.A)), nil
- case "CNAME":
- return big.NewInt(int64(nns.CNAME)), nil
- case "SOA":
- return big.NewInt(int64(nns.SOA)), nil
- case "TXT":
- return big.NewInt(int64(nns.TXT)), nil
- case "AAAA":
- return big.NewInt(int64(nns.AAAA)), nil
- }
- return nil, errors.New("unsupported record type")
-}
-
-func recordTypeToString(rt nns.RecordType) string {
- switch rt {
- case nns.A:
- return "A"
- case nns.CNAME:
- return "CNAME"
- case nns.SOA:
- return "SOA"
- case nns.TXT:
- return "TXT"
- case nns.AAAA:
- return "AAAA"
- }
- return ""
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
deleted file mode 100644
index 53bd943f0..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package nns
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
-)
-
-func initRenewCmd() {
- Cmd.AddCommand(renewCmd)
- renewCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- renewCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- renewCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
-}
-
-func renewDomain(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- h, vub, err := c.Renew(name)
- commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "renew domain error: %w", err)
- cmd.Println("Domain renewed successfully")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
deleted file mode 100644
index bb84933c6..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package nns
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- nnsNameFlag = "name"
- nnsNameFlagDesc = "Domain name"
- nnsEmailFlag = "email"
- nnsRefreshFlag = "refresh"
- nnsRetryFlag = "retry"
- nnsExpireFlag = "expire"
- nnsTTLFlag = "ttl"
- nnsRecordTypeFlag = "type"
- nnsRecordTypeFlagDesc = "Domain name service record type(A|CNAME|SOA|TXT)"
- nnsRecordDataFlag = "data"
- nnsRecordDataFlagDesc = "Domain name service record data"
-)
-
-var (
- Cmd = &cobra.Command{
- Use: "nns",
- Short: "Section for Neo Name Service (NNS)",
- }
- tokensCmd = &cobra.Command{
- Use: "tokens",
- Short: "List all registered domain names",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: listTokens,
- }
- registerCmd = &cobra.Command{
- Use: "register",
- Short: "Registers a new domain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: registerDomain,
- }
- deleteCmd = &cobra.Command{
- Use: "delete",
- Short: "Delete a domain by name",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: deleteDomain,
- }
- renewCmd = &cobra.Command{
- Use: "renew",
- Short: "Increases domain expiration date",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- Run: renewDomain,
- }
- updateCmd = &cobra.Command{
- Use: "update",
- Short: "Updates soa record",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- },
- Run: updateSOA,
- }
- addRecordCmd = &cobra.Command{
- Use: "add-record",
- Short: "Adds a new record of the specified type to the provided domain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: addRecord,
- }
- getRecordsCmd = &cobra.Command{
- Use: "get-records",
- Short: "Returns domain record of the specified type",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: getRecords,
- }
- delRecordsCmd = &cobra.Command{
- Use: "delete-records",
- Short: "Removes domain records with the specified type",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: delRecords,
- }
- delRecordCmd = &cobra.Command{
- Use: "delete-record",
- Short: "Removes domain record with the specified type and data",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- },
- Run: delRecord,
- }
- setAdminCmd = &cobra.Command{
- Use: "set-admin",
- Short: "Sets admin for domain",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
- _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
- },
- Run: setAdmin,
- }
-)
-
-func init() {
- initTokensCmd()
- initRegisterCmd()
- initDeleteCmd()
- initRenewCmd()
- initUpdateCmd()
- initAddRecordCmd()
- initGetRecordsCmd()
- initDelRecordsCmd()
- initDelRecordCmd()
- initSetAdminCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
deleted file mode 100644
index 4ccbb1677..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package nns
-
-import (
- "math/big"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
-)
-
-const (
- verboseDesc = "Include additional information about CNAME record."
-)
-
-func initTokensCmd() {
- Cmd.AddCommand(tokensCmd)
- tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
-}
-
-func listTokens(cmd *cobra.Command, _ []string) {
- c, _ := nnsReader(cmd)
- it, err := c.Tokens()
- commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
- for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
- for _, token := range toks {
- output := string(token)
- if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose {
- cname, err := getCnameRecord(c, token)
- commonCmd.ExitOnErr(cmd, "", err)
- if cname != "" {
- output += " (CNAME: " + cname + ")"
- }
- }
- cmd.Println(output)
- }
- }
-}
-
-func getCnameRecord(c *client.ContractReader, token []byte) (string, error) {
- items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
-
- // GetRecords returns the error "not an array" if the domain does not contain records.
- if err != nil && strings.Contains(err.Error(), "not an array") {
- return "", nil
- }
-
- if err != nil {
- return "", err
- }
-
- if len(items) == 0 {
- return "", nil
- }
-
- record, err := items[0].TryBytes()
- if err != nil {
- return "", err
- }
-
- return string(record), nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go
deleted file mode 100644
index c6d77ead6..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package nns
-
-import (
- "math/big"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
-)
-
-func initUpdateCmd() {
- Cmd.AddCommand(updateCmd)
- updateCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- updateCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- updateCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
- updateCmd.Flags().String(nnsEmailFlag, constants.FrostfsOpsEmail, "Domain owner email")
- updateCmd.Flags().Int64(nnsRefreshFlag, constants.NNSRefreshDefVal,
- "The number of seconds between update requests from secondary and slave name servers")
- updateCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal,
- "The number of seconds the secondary or slave will wait before retrying when the last attempt has failed")
- updateCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime),
- "The number of seconds a master or slave will wait before considering the data stale "+
- "if it cannot reach the primary name server")
- updateCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal,
- "The number of seconds a domain name is cached locally before expiration and return to authoritative "+
- "nameservers for updated information")
-
- _ = cobra.MarkFlagRequired(updateCmd.Flags(), nnsNameFlag)
-}
-
-func updateSOA(cmd *cobra.Command, _ []string) {
- c, actor := nnsWriter(cmd)
-
- name, _ := cmd.Flags().GetString(nnsNameFlag)
- email, _ := cmd.Flags().GetString(nnsEmailFlag)
- refresh, _ := cmd.Flags().GetInt64(nnsRefreshFlag)
- retry, _ := cmd.Flags().GetInt64(nnsRetryFlag)
- expire, _ := cmd.Flags().GetInt64(nnsExpireFlag)
- ttl, _ := cmd.Flags().GetInt64(nnsTTLFlag)
-
- h, vub, err := c.UpdateSOA(name, email, big.NewInt(refresh),
- big.NewInt(retry), big.NewInt(expire), big.NewInt(ttl))
- commonCmd.ExitOnErr(cmd, "unable to send transaction: %w", err)
-
- cmd.Println("Waiting for transaction to persist...")
- _, err = actor.Wait(h, vub, err)
- commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
- cmd.Println("SOA records updated successfully")
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/root.go b/cmd/frostfs-adm/internal/modules/morph/node/root.go
deleted file mode 100644
index 1c38ae8bc..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/node/root.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package node
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var RemoveCmd = &cobra.Command{
- Use: "remove-nodes key1 [key2 [...]]",
- Short: "Remove storage nodes from the netmap",
- Long: `Move nodes to the Offline state in the candidates list and tick an epoch to update the netmap`,
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: RemoveNodesCmd,
-}
-
-func initRemoveNodesCmd() {
- RemoveCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- RemoveCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RemoveCmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
-}
-
-func init() {
- initRemoveNodesCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary.go
similarity index 66%
rename from cmd/frostfs-adm/internal/modules/morph/notary/notary.go
rename to cmd/frostfs-adm/internal/modules/morph/notary.go
index 3435926c0..2459f127b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go
+++ b/cmd/frostfs-adm/internal/modules/morph/notary.go
@@ -1,12 +1,10 @@
-package notary
+package morph
import (
- "errors"
"fmt"
"math/big"
+ "strconv"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -22,16 +20,9 @@ import (
"github.com/spf13/viper"
)
-const (
- // defaultNotaryDepositLifetime is an amount of blocks notary deposit stays valid.
- // https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/native/notary.go#L48
- defaultNotaryDepositLifetime = 5760
-
- walletAccountFlag = "account"
- notaryDepositTillFlag = "till"
-)
-
-var errInvalidNotaryDepositLifetime = errors.New("notary deposit lifetime must be a positive integer")
+// defaultNotaryDepositLifetime is an amount of blocks notary deposit stays valid.
+// https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/native/notary.go#L48
+const defaultNotaryDepositLifetime = 5760
func depositNotary(cmd *cobra.Command, _ []string) error {
w, err := openWallet(cmd)
@@ -40,8 +31,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
- addr, _ := cmd.Flags().GetString(walletAccountFlag)
- if addr != "" {
+ if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@@ -53,7 +43,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
- prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
+ prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@@ -64,30 +54,37 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't unlock account: %v", err)
}
- gasStr, err := cmd.Flags().GetString(commonflags.RefillGasAmountFlag)
+ gasStr, err := cmd.Flags().GetString(refillGasAmountFlag)
if err != nil {
return err
}
- gasAmount, err := helper.ParseGASAmount(gasStr)
+ gasAmount, err := parseGASAmount(gasStr)
if err != nil {
return err
}
- till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
- if till <= 0 {
- return errInvalidNotaryDepositLifetime
+ till := int64(defaultNotaryDepositLifetime)
+ tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
+ if err != nil {
+ return err
+ }
+ if tillStr != "" {
+ till, err = strconv.ParseInt(tillStr, 10, 64)
+ if err != nil || till <= 0 {
+ return fmt.Errorf("notary deposit lifetime must be a positive integer")
+ }
}
return transferGas(cmd, acc, accHash, gasAmount, till)
}
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
+ c, err := getN3Client(viper.GetViper())
if err != nil {
return err
}
- if err := helper.CheckNotaryEnabled(c); err != nil {
+ if err := checkNotaryEnabled(c); err != nil {
return err
}
@@ -119,15 +116,15 @@ func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160,
return fmt.Errorf("could not send tx: %w", err)
}
- return helper.AwaitTx(cmd, c, []helper.HashVUBPair{{Hash: txHash, Vub: vub}})
+ return awaitTx(cmd, c, []hashVUBPair{{hash: txHash, vub: vub}})
}
func openWallet(cmd *cobra.Command) (*wallet.Wallet, error) {
- p, err := cmd.Flags().GetString(commonflags.StorageWalletFlag)
+ p, err := cmd.Flags().GetString(storageWalletFlag)
if err != nil {
return nil, err
} else if p == "" {
- return nil, fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag)
+ return nil, fmt.Errorf("missing wallet path (use '--%s ')", storageWalletFlag)
}
w, err := wallet.NewWalletFromFile(p)
diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go
deleted file mode 100644
index d7be2e503..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package notary
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var DepositCmd = &cobra.Command{
- Use: "deposit-notary",
- Short: "Deposit GAS for notary service",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: depositNotary,
-}
-
-func initDepositoryNotaryCmd() {
- DepositCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
- DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
- DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
- DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
-}
-
-func init() {
- initDepositoryNotaryCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy.go
new file mode 100644
index 000000000..0703ebc2d
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/policy.go
@@ -0,0 +1,54 @@
+package morph
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/nspcc-dev/neo-go/pkg/io"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/policy"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
+ "github.com/nspcc-dev/neo-go/pkg/vm/emit"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ execFeeParam = "ExecFeeFactor"
+ storagePriceParam = "StoragePrice"
+ setFeeParam = "FeePerByte"
+)
+
+func setPolicyCmd(cmd *cobra.Command, args []string) error {
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
+ if err != nil {
+ return fmt.Errorf("can't to initialize context: %w", err)
+ }
+
+ bw := io.NewBufBinWriter()
+ for i := range args {
+ k, v, found := strings.Cut(args[i], "=")
+ if !found {
+ return fmt.Errorf("invalid parameter format, must be Parameter=Value")
+ }
+
+ switch k {
+ case execFeeParam, storagePriceParam, setFeeParam:
+ default:
+ return fmt.Errorf("parameter must be one of %s, %s and %s", execFeeParam, storagePriceParam, setFeeParam)
+ }
+
+ value, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return fmt.Errorf("can't parse parameter value '%s': %w", args[1], err)
+ }
+
+ emit.AppCall(bw.BinWriter, policy.Hash, "set"+k, callflag.All, int64(value))
+ }
+
+ if err := wCtx.sendCommitteeTx(bw.Bytes(), false); err != nil {
+ return err
+ }
+
+ return wCtx.awaitTx()
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
deleted file mode 100644
index f2932e87c..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package policy
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "text/tabwriter"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/policy"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- execFeeParam = "ExecFeeFactor"
- storagePriceParam = "StoragePrice"
- setFeeParam = "FeePerByte"
-)
-
-var errInvalidParameterFormat = errors.New("invalid parameter format, must be Parameter=Value")
-
-func SetPolicyCmd(cmd *cobra.Command, args []string) error {
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return fmt.Errorf("can't initialize context: %w", err)
- }
-
- bw := io.NewBufBinWriter()
- for i := range args {
- k, v, found := strings.Cut(args[i], "=")
- if !found {
- return errInvalidParameterFormat
- }
-
- switch k {
- case execFeeParam, storagePriceParam, setFeeParam:
- default:
- return fmt.Errorf("parameter must be one of %s, %s and %s", execFeeParam, storagePriceParam, setFeeParam)
- }
-
- value, err := strconv.ParseUint(v, 10, 32)
- if err != nil {
- return fmt.Errorf("can't parse parameter value '%s': %w", args[i], err)
- }
-
- emit.AppCall(bw.BinWriter, policy.Hash, "set"+k, callflag.All, int64(value))
- }
-
- if err := wCtx.SendCommitteeTx(bw.Bytes(), false); err != nil {
- return err
- }
-
- return wCtx.AwaitTx()
-}
-
-func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
- c, err := helper.NewRemoteClient(viper.GetViper())
- commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
-
- inv := invoker.New(c, nil)
- policyContract := policy.NewReader(inv)
-
- execFee, err := policyContract.GetExecFeeFactor()
- commonCmd.ExitOnErr(cmd, "can't get execution fee factor:", err)
-
- feePerByte, err := policyContract.GetFeePerByte()
- commonCmd.ExitOnErr(cmd, "can't get fee per byte:", err)
-
- storagePrice, err := policyContract.GetStoragePrice()
- commonCmd.ExitOnErr(cmd, "can't get storage price:", err)
-
- buf := bytes.NewBuffer(nil)
- tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
-
- _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
- _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
- _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
-
- _ = tw.Flush()
- cmd.Print(buf.String())
-
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/root.go b/cmd/frostfs-adm/internal/modules/morph/policy/root.go
deleted file mode 100644
index a8a356207..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/policy/root.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package policy
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- Set = &cobra.Command{
- Use: "set-policy [ExecFeeFactor=] [StoragePrice=] [FeePerByte=]",
- DisableFlagsInUseLine: true,
- Short: "Set global policy values",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: SetPolicyCmd,
- ValidArgsFunction: func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"ExecFeeFactor=", "StoragePrice=", "FeePerByte="}, cobra.ShellCompDirectiveNoSpace
- },
- }
-
- Dump = &cobra.Command{
- Use: "dump-policy",
- Short: "Dump FrostFS policy",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- RunE: dumpPolicyCmd,
- }
-)
-
-func initSetPolicyCmd() {
- Set.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
- Set.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- Set.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
-}
-
-func initDumpPolicyCmd() {
- Dump.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
-}
-
-func init() {
- initSetPolicyCmd()
- initDumpPolicyCmd()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
deleted file mode 100644
index 24cda45a6..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package proxy
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/nspcc-dev/neo-go/pkg/encoding/address"
- "github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-const (
- accountAddressFlag = "account"
-)
-
-func parseAddresses(cmd *cobra.Command) []util.Uint160 {
- var addrs []util.Uint160
-
- accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
- for _, acc := range accs {
- addr, err := address.StringToUint160(acc)
- commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
-
- addrs = append(addrs, addr)
- }
- return addrs
-}
-
-func addProxyAccount(cmd *cobra.Command, _ []string) {
- addrs := parseAddresses(cmd)
- err := processAccount(cmd, addrs, "addAccount")
- commonCmd.ExitOnErr(cmd, "processing error: %w", err)
-}
-
-func removeProxyAccount(cmd *cobra.Command, _ []string) {
- addrs := parseAddresses(cmd)
- err := processAccount(cmd, addrs, "removeAccount")
- commonCmd.ExitOnErr(cmd, "processing error: %w", err)
-}
-
-func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
- if err != nil {
- return fmt.Errorf("can't initialize context: %w", err)
- }
-
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- cs, err := helper.GetContractByID(r, 1)
- if err != nil {
- return fmt.Errorf("can't get NNS contract info: %w", err)
- }
-
- proxyHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.ProxyContract))
- if err != nil {
- return fmt.Errorf("can't get proxy contract hash: %w", err)
- }
-
- bw := io.NewBufBinWriter()
- for _, addr := range addrs {
- emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
- }
-
- if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
- return err
- }
-
- if err = wCtx.AwaitTx(); err != nil {
- return err
- }
-
- cmd.Println("Proxy contract has been updated")
-
- return nil
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
deleted file mode 100644
index ad89af2b5..000000000
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package proxy
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
-)
-
-var (
- AddAccountCmd = &cobra.Command{
- Use: "proxy-add-account",
- Short: "Adds account to proxy contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: addProxyAccount,
- }
- RemoveAccountCmd = &cobra.Command{
- Use: "proxy-remove-account",
- Short: "Remove from proxy contract",
- PreRun: func(cmd *cobra.Command, _ []string) {
- _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
- _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- },
- Run: removeProxyAccount,
- }
-)
-
-func initProxyAddAccount() {
- AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
- _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
- AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func initProxyRemoveAccount() {
- RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
- RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
- _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
- RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
-}
-
-func init() {
- initProxyAddAccount()
- initProxyRemoveAccount()
-}
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/remove_node.go
similarity index 59%
rename from cmd/frostfs-adm/internal/modules/morph/node/remove.go
rename to cmd/frostfs-adm/internal/modules/morph/remove_node.go
index e47451e0c..df67433c4 100644
--- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go
+++ b/cmd/frostfs-adm/internal/modules/morph/remove_node.go
@@ -1,22 +1,19 @@
-package node
+package morph
import (
"errors"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/io"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
-func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
+func removeNodesCmd(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("at least one node key must be provided")
}
@@ -30,19 +27,18 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
}
}
- wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
}
- defer wCtx.Close()
+ defer wCtx.close()
- r := management.NewReader(wCtx.ReadOnlyInvoker)
- cs, err := helper.GetContractByID(r, 1)
+ cs, err := wCtx.Client.GetContractStateByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
- nmHash, err := helper.NNSResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, helper.DomainOf(constants.NetmapContract))
+ nmHash, err := nnsResolveHash(wCtx.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
if err != nil {
return fmt.Errorf("can't get netmap contract hash: %w", err)
}
@@ -53,13 +49,13 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes())
}
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil {
+ if err := emitNewEpochCall(bw, wCtx, nmHash); err != nil {
return err
}
- if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
+ if err := wCtx.sendConsensusTx(bw.Bytes()); err != nil {
return err
}
- return wCtx.AwaitTx()
+ return wCtx.awaitTx()
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/root.go b/cmd/frostfs-adm/internal/modules/morph/root.go
index e8426d56e..bee1837a3 100644
--- a/cmd/frostfs-adm/internal/modules/morph/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/root.go
@@ -1,54 +1,387 @@
package morph
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/balance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/contract"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/generate"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/initialize"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/nns"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/node"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/notary"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/policy"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/proxy"
"github.com/spf13/cobra"
+ "github.com/spf13/viper"
)
-// RootCmd is a root command of config section.
-var RootCmd = &cobra.Command{
- Use: "morph",
- Short: "Section for morph network configuration commands",
-}
+const (
+ alphabetWalletsFlag = "alphabet-wallets"
+ alphabetSizeFlag = "size"
+ endpointFlag = "rpc-endpoint"
+ storageWalletFlag = "storage-wallet"
+ storageWalletLabelFlag = "label"
+ storageGasCLIFlag = "initial-gas"
+ storageGasConfigFlag = "storage.initial_gas"
+ contractsInitFlag = "contracts"
+ maxObjectSizeInitFlag = "network.max_object_size"
+ maxObjectSizeCLIFlag = "max-object-size"
+ epochDurationInitFlag = "network.epoch_duration"
+ epochDurationCLIFlag = "epoch-duration"
+ containerFeeInitFlag = "network.fee.container"
+ containerAliasFeeInitFlag = "network.fee.container_alias"
+ containerFeeCLIFlag = "container-fee"
+ containerAliasFeeCLIFlag = "container-alias-fee"
+ candidateFeeInitFlag = "network.fee.candidate"
+ candidateFeeCLIFlag = "candidate-fee"
+ homomorphicHashDisabledInitFlag = "network.homomorphic_hash_disabled"
+ maintenanceModeAllowedInitFlag = "network.maintenance_mode_allowed"
+ homomorphicHashDisabledCLIFlag = "homomorphic-disabled"
+ withdrawFeeInitFlag = "network.fee.withdraw"
+ withdrawFeeCLIFlag = "withdraw-fee"
+ containerDumpFlag = "dump"
+ containerContractFlag = "container-contract"
+ containerIDsFlag = "cid"
+ refillGasAmountFlag = "gas"
+ walletAccountFlag = "account"
+ notaryDepositTillFlag = "till"
+ localDumpFlag = "local-dump"
+ protoConfigPath = "protocol"
+ walletAddressFlag = "wallet-address"
+)
+
+var (
+ // RootCmd is a root command of config section.
+ RootCmd = &cobra.Command{
+ Use: "morph",
+ Short: "Section for morph network configuration commands",
+ }
+
+ generateAlphabetCmd = &cobra.Command{
+ Use: "generate-alphabet",
+ Short: "Generate alphabet wallets for consensus nodes of the morph network",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ // PreRun fixes https://github.com/spf13/viper/issues/233
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ },
+ RunE: generateAlphabetCreds,
+ }
+
+ initCmd = &cobra.Command{
+ Use: "init",
+ Short: "Initialize side chain network with smart-contracts and network settings",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ _ = viper.BindPFlag(epochDurationInitFlag, cmd.Flags().Lookup(epochDurationCLIFlag))
+ _ = viper.BindPFlag(maxObjectSizeInitFlag, cmd.Flags().Lookup(maxObjectSizeCLIFlag))
+ _ = viper.BindPFlag(homomorphicHashDisabledInitFlag, cmd.Flags().Lookup(homomorphicHashDisabledCLIFlag))
+ _ = viper.BindPFlag(candidateFeeInitFlag, cmd.Flags().Lookup(candidateFeeCLIFlag))
+ _ = viper.BindPFlag(containerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
+ _ = viper.BindPFlag(containerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
+ _ = viper.BindPFlag(withdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
+ _ = viper.BindPFlag(protoConfigPath, cmd.Flags().Lookup(protoConfigPath))
+ },
+ RunE: initializeSideChainCmd,
+ }
+
+ generateStorageCmd = &cobra.Command{
+ Use: "generate-storage-wallet",
+ Short: "Generate storage node wallet for the morph network",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ _ = viper.BindPFlag(storageGasConfigFlag, cmd.Flags().Lookup(storageGasCLIFlag))
+ },
+ RunE: generateStorageCreds,
+ }
+
+ refillGasCmd = &cobra.Command{
+ Use: "refill-gas",
+ Short: "Refill GAS of storage node's wallet in the morph network",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ _ = viper.BindPFlag(refillGasAmountFlag, cmd.Flags().Lookup(refillGasAmountFlag))
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return refillGas(cmd, refillGasAmountFlag, false)
+ },
+ }
+
+ forceNewEpoch = &cobra.Command{
+ Use: "force-new-epoch",
+ Short: "Create new FrostFS epoch event in the side chain",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: forceNewEpochCmd,
+ }
+
+ removeNodes = &cobra.Command{
+ Use: "remove-nodes key1 [key2 [...]]",
+ Short: "Remove storage nodes from the netmap",
+ Long: `Move nodes to the Offline state in the candidates list and tick an epoch to update the netmap`,
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: removeNodesCmd,
+ }
+
+ setConfig = &cobra.Command{
+ Use: "set-config key1=val1 [key2=val2 ...]",
+ DisableFlagsInUseLine: true,
+ Short: "Add/update global config value in the FrostFS network",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ Args: cobra.MinimumNArgs(1),
+ RunE: setConfigCmd,
+ }
+
+ setPolicy = &cobra.Command{
+ Use: "set-policy [ExecFeeFactor=] [StoragePrice=] [FeePerByte=]",
+ DisableFlagsInUseLine: true,
+ Short: "Set global policy values",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: setPolicyCmd,
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"ExecFeeFactor=", "StoragePrice=", "FeePerByte="}, cobra.ShellCompDirectiveNoSpace
+ },
+ }
+
+ dumpContractHashesCmd = &cobra.Command{
+ Use: "dump-hashes",
+ Short: "Dump deployed contract hashes",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: dumpContractHashes,
+ }
+
+ dumpNetworkConfigCmd = &cobra.Command{
+ Use: "dump-config",
+ Short: "Dump FrostFS network config",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: dumpNetworkConfig,
+ }
+
+ dumpBalancesCmd = &cobra.Command{
+ Use: "dump-balances",
+ Short: "Dump GAS balances",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: dumpBalances,
+ }
+
+ updateContractsCmd = &cobra.Command{
+ Use: "update-contracts",
+ Short: "Update FrostFS contracts",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: updateContracts,
+ }
+
+ dumpContainersCmd = &cobra.Command{
+ Use: "dump-containers",
+ Short: "Dump FrostFS containers to file",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: dumpContainers,
+ }
+
+ restoreContainersCmd = &cobra.Command{
+ Use: "restore-containers",
+ Short: "Restore FrostFS containers from file",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: restoreContainers,
+ }
+
+ listContainersCmd = &cobra.Command{
+ Use: "list-containers",
+ Short: "List FrostFS containers",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: listContainers,
+ }
+
+ depositNotaryCmd = &cobra.Command{
+ Use: "deposit-notary",
+ Short: "Deposit GAS for notary service",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ },
+ RunE: depositNotary,
+ }
+
+ netmapCandidatesCmd = &cobra.Command{
+ Use: "netmap-candidates",
+ Short: "List netmap candidates nodes",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
+ _ = viper.BindPFlag(alphabetWalletsFlag, cmd.Flags().Lookup(alphabetWalletsFlag))
+ },
+ Run: listNetmapCandidatesNodes,
+ }
+)
func init() {
- RootCmd.AddCommand(generate.RefillGasCmd)
- RootCmd.AddCommand(initialize.Cmd)
- RootCmd.AddCommand(contract.DeployCmd)
- RootCmd.AddCommand(generate.GenerateStorageCmd)
- RootCmd.AddCommand(netmap.ForceNewEpoch)
- RootCmd.AddCommand(node.RemoveCmd)
- RootCmd.AddCommand(policy.Set)
- RootCmd.AddCommand(policy.Dump)
- RootCmd.AddCommand(contract.DumpHashesCmd)
- RootCmd.AddCommand(config.SetCmd)
- RootCmd.AddCommand(config.DumpCmd)
- RootCmd.AddCommand(balance.DumpCmd)
- RootCmd.AddCommand(contract.UpdateCmd)
- RootCmd.AddCommand(container.ListCmd)
- RootCmd.AddCommand(container.RestoreCmd)
- RootCmd.AddCommand(container.DumpCmd)
- RootCmd.AddCommand(generate.GenerateAlphabetCmd)
- RootCmd.AddCommand(notary.DepositCmd)
- RootCmd.AddCommand(netmap.CandidatesCmd)
-
- RootCmd.AddCommand(ape.Cmd)
- RootCmd.AddCommand(proxy.AddAccountCmd)
- RootCmd.AddCommand(proxy.RemoveAccountCmd)
-
- RootCmd.AddCommand(frostfsid.Cmd)
- RootCmd.AddCommand(nns.Cmd)
+ initGenerateAlphabetCmd()
+ initInitCmd()
+ initDeployCmd()
+ initGenerateStorageCmd()
+ initForceNewEpochCmd()
+ initRemoveNodesCmd()
+ initSetPolicyCmd()
+ initDumpContractHashesCmd()
+ initDumpNetworkConfigCmd()
+ initSetConfigCmd()
+ initDumpBalancesCmd()
+ initUpdateContractsCmd()
+ initDumpContainersCmd()
+ initRestoreContainersCmd()
+ initListContainersCmd()
+ initRefillGasCmd()
+ initDepositoryNotaryCmd()
+ initNetmapCandidatesCmd()
+}
+
+func initNetmapCandidatesCmd() {
+ RootCmd.AddCommand(netmapCandidatesCmd)
+ netmapCandidatesCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+}
+
+func initDepositoryNotaryCmd() {
+ RootCmd.AddCommand(depositNotaryCmd)
+ depositNotaryCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ depositNotaryCmd.Flags().String(storageWalletFlag, "", "Path to storage node wallet")
+ depositNotaryCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
+ depositNotaryCmd.Flags().String(refillGasAmountFlag, "", "Amount of GAS to deposit")
+ depositNotaryCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
+}
+
+func initRefillGasCmd() {
+ RootCmd.AddCommand(refillGasCmd)
+ refillGasCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ refillGasCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ refillGasCmd.Flags().String(storageWalletFlag, "", "Path to storage node wallet")
+ refillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
+ refillGasCmd.Flags().String(refillGasAmountFlag, "", "Additional amount of GAS to transfer")
+ refillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, storageWalletFlag)
+}
+
+func initListContainersCmd() {
+ RootCmd.AddCommand(listContainersCmd)
+ listContainersCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ listContainersCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)")
+}
+
+func initRestoreContainersCmd() {
+ RootCmd.AddCommand(restoreContainersCmd)
+ restoreContainersCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ restoreContainersCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ restoreContainersCmd.Flags().String(containerDumpFlag, "", "File to restore containers from")
+ restoreContainersCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to restore")
+}
+
+func initDumpContainersCmd() {
+ RootCmd.AddCommand(dumpContainersCmd)
+ dumpContainersCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ dumpContainersCmd.Flags().String(containerDumpFlag, "", "File where to save dumped containers")
+ dumpContainersCmd.Flags().String(containerContractFlag, "", "Container contract hash (for networks without NNS)")
+ dumpContainersCmd.Flags().StringSlice(containerIDsFlag, nil, "Containers to dump")
+}
+
+func initUpdateContractsCmd() {
+ RootCmd.AddCommand(updateContractsCmd)
+ updateContractsCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ updateContractsCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ updateContractsCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts")
+ _ = updateContractsCmd.MarkFlagRequired(contractsInitFlag)
+}
+
+func initDumpBalancesCmd() {
+ RootCmd.AddCommand(dumpBalancesCmd)
+ dumpBalancesCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ dumpBalancesCmd.Flags().BoolP(dumpBalancesStorageFlag, "s", false, "Dump balances of storage nodes from the current netmap")
+ dumpBalancesCmd.Flags().BoolP(dumpBalancesAlphabetFlag, "a", false, "Dump balances of alphabet contracts")
+ dumpBalancesCmd.Flags().BoolP(dumpBalancesProxyFlag, "p", false, "Dump balances of the proxy contract")
+ dumpBalancesCmd.Flags().Bool(dumpBalancesUseScriptHashFlag, false, "Use script-hash format for addresses")
+}
+
+func initSetConfigCmd() {
+ RootCmd.AddCommand(setConfig)
+ setConfig.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ setConfig.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ setConfig.Flags().Bool(forceConfigSet, false, "Force setting not well-known configuration key")
+}
+
+func initDumpNetworkConfigCmd() {
+ RootCmd.AddCommand(dumpNetworkConfigCmd)
+ dumpNetworkConfigCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+}
+
+func initDumpContractHashesCmd() {
+ RootCmd.AddCommand(dumpContractHashesCmd)
+ dumpContractHashesCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ dumpContractHashesCmd.Flags().String(customZoneFlag, "", "Custom zone to search.")
+}
+
+func initSetPolicyCmd() {
+ RootCmd.AddCommand(setPolicy)
+ setPolicy.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ setPolicy.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+}
+
+func initRemoveNodesCmd() {
+ RootCmd.AddCommand(removeNodes)
+ removeNodes.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ removeNodes.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+}
+
+func initForceNewEpochCmd() {
+ RootCmd.AddCommand(forceNewEpoch)
+ forceNewEpoch.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ forceNewEpoch.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+}
+
+func initGenerateStorageCmd() {
+ RootCmd.AddCommand(generateStorageCmd)
+ generateStorageCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ generateStorageCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ generateStorageCmd.Flags().String(storageWalletFlag, "", "Path to new storage node wallet")
+ generateStorageCmd.Flags().String(storageGasCLIFlag, "", "Initial amount of GAS to transfer")
+ generateStorageCmd.Flags().StringP(storageWalletLabelFlag, "l", "", "Wallet label")
+}
+
+func initInitCmd() {
+ RootCmd.AddCommand(initCmd)
+ initCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ initCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
+ initCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts")
+ _ = initCmd.MarkFlagRequired(contractsInitFlag)
+ initCmd.Flags().Uint(epochDurationCLIFlag, 240, "Amount of side chain blocks in one FrostFS epoch")
+ initCmd.Flags().Uint(maxObjectSizeCLIFlag, 67108864, "Max single object size in bytes")
+ initCmd.Flags().Bool(homomorphicHashDisabledCLIFlag, false, "Disable object homomorphic hashing")
+ // Defaults are taken from neo-preodolenie.
+ initCmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee")
+ initCmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee")
+ initCmd.Flags().String(protoConfigPath, "", "Path to the consensus node configuration")
+ initCmd.Flags().String(localDumpFlag, "", "Path to the blocks dump file")
+}
+
+func initGenerateAlphabetCmd() {
+ RootCmd.AddCommand(generateAlphabetCmd)
+ generateAlphabetCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
+ generateAlphabetCmd.Flags().Uint(alphabetSizeFlag, 7, "Amount of alphabet wallets to generate")
+}
+
+func initDeployCmd() {
+ RootCmd.AddCommand(deployCmd)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/update.go b/cmd/frostfs-adm/internal/modules/morph/update.go
new file mode 100644
index 000000000..90b6d6558
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/update.go
@@ -0,0 +1,21 @@
+package morph
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func updateContracts(cmd *cobra.Command, _ []string) error {
+ wCtx, err := newInitializeContext(cmd, viper.GetViper())
+ if err != nil {
+ return fmt.Errorf("initialization error: %w", err)
+ }
+
+ if err := wCtx.deployNNS(updateMethodName); err != nil {
+ return err
+ }
+
+ return wCtx.updateContracts()
+}
diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go
index cc8225c7a..0fa0f7f69 100644
--- a/cmd/frostfs-adm/internal/modules/root.go
+++ b/cmd/frostfs-adm/internal/modules/root.go
@@ -5,9 +5,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@@ -16,14 +15,16 @@ import (
"github.com/spf13/viper"
)
-var rootCmd = &cobra.Command{
- Use: "frostfs-adm",
- Short: "FrostFS Administrative Tool",
- Long: `FrostFS Administrative Tool provides functions to setup and
+var (
+ rootCmd = &cobra.Command{
+ Use: "frostfs-adm",
+ Short: "FrostFS Administrative Tool",
+ Long: `FrostFS Administrative Tool provides functions to setup and
manage FrostFS network deployment.`,
- RunE: entryPoint,
- SilenceUsage: true,
-}
+ RunE: entryPoint,
+ SilenceUsage: true,
+ }
+)
func init() {
cobra.OnInitialize(func() { initConfig(rootCmd) })
@@ -41,8 +42,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
- rootCmd.AddCommand(metabase.RootCmd)
- rootCmd.AddCommand(maintenance.RootCmd)
+ rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go
new file mode 100644
index 000000000..a07ce32c6
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go
@@ -0,0 +1,135 @@
+package storagecfg
+
+const configTemplate = `logger:
+ level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
+
+node:
+ wallet:
+ path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
+ address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
+ password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
+ addresses: # list of addresses announced by Storage node in the Network map
+ - {{ .AnnouncedAddress }}
+ attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
+ relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
+
+grpc:
+ num: 1 # total number of listener endpoints
+ 0:
+ endpoint: {{ .Endpoint }} # endpoint for gRPC server
+ tls:{{if .TLSCert}}
+ enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
+ certificate: {{ .TLSCert }} # path to TLS certificate
+ key: {{ .TLSKey }} # path to TLS key
+ {{- else }}
+ enabled: false # disable TLS for a gRPC connection
+ {{- end}}
+
+control:
+ authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
+ {{- range .AuthorizedKeys }}
+ - {{.}}{{end}}
+ grpc:
+ endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
+
+morph:
+ dial_timeout: 20s # timeout for side chain NEO RPC client connection
+ cache_ttl: 15s # use TTL cache for side chain GET operations
+ rpc_endpoint: # side chain N3 RPC endpoints
+ {{- range .MorphRPC }}
+ - address: wss://{{.}}/ws{{end}}
+{{if not .Relay }}
+storage:
+ shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
+
+ shard:
+ default: # section with the default shard parameters
+ metabase:
+ perm: 0644 # permissions for metabase files(directories: +x for current user and group)
+
+ blobstor:
+ perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
+ depth: 2 # max depth of object tree storage in FS
+ small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
+ compress: true # turn on/off Zstandard compression (level 3) of stored objects
+ compression_exclude_content_types:
+ - audio/*
+ - video/*
+
+ blobovnicza:
+ size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
+ depth: 1 # max depth of object tree storage in key-value DB
+ width: 4 # max width of object tree storage in key-value DB
+ opened_cache_capacity: 50 # maximum number of opened database files
+
+ gc:
+ remover_batch_size: 200 # number of objects to be removed by the garbage collector
+ remover_sleep_interval: 5m # frequency of the garbage collector invocation
+ 0:
+ mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
+
+ metabase:
+ path: {{ .MetabasePath }} # path to the metabase
+
+ blobstor:
+ path: {{ .BlobstorPath }} # path to the blobstor
+{{end}}`
+
+const (
+ neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
+ balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
+ neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
+ balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
+)
+
+var n3config = map[string]struct {
+ MorphRPC []string
+ RPC []string
+ NeoFSContract string
+ BalanceContract string
+}{
+ "testnet": {
+ MorphRPC: []string{
+ "rpc01.morph.testnet.fs.neo.org:51331",
+ "rpc02.morph.testnet.fs.neo.org:51331",
+ "rpc03.morph.testnet.fs.neo.org:51331",
+ "rpc04.morph.testnet.fs.neo.org:51331",
+ "rpc05.morph.testnet.fs.neo.org:51331",
+ "rpc06.morph.testnet.fs.neo.org:51331",
+ "rpc07.morph.testnet.fs.neo.org:51331",
+ },
+ RPC: []string{
+ "rpc01.testnet.n3.nspcc.ru:21331",
+ "rpc02.testnet.n3.nspcc.ru:21331",
+ "rpc03.testnet.n3.nspcc.ru:21331",
+ "rpc04.testnet.n3.nspcc.ru:21331",
+ "rpc05.testnet.n3.nspcc.ru:21331",
+ "rpc06.testnet.n3.nspcc.ru:21331",
+ "rpc07.testnet.n3.nspcc.ru:21331",
+ },
+ NeoFSContract: neofsTestnetAddress,
+ BalanceContract: balanceTestnetAddress,
+ },
+ "mainnet": {
+ MorphRPC: []string{
+ "rpc1.morph.fs.neo.org:40341",
+ "rpc2.morph.fs.neo.org:40341",
+ "rpc3.morph.fs.neo.org:40341",
+ "rpc4.morph.fs.neo.org:40341",
+ "rpc5.morph.fs.neo.org:40341",
+ "rpc6.morph.fs.neo.org:40341",
+ "rpc7.morph.fs.neo.org:40341",
+ },
+ RPC: []string{
+ "rpc1.n3.nspcc.ru:10331",
+ "rpc2.n3.nspcc.ru:10331",
+ "rpc3.n3.nspcc.ru:10331",
+ "rpc4.n3.nspcc.ru:10331",
+ "rpc5.n3.nspcc.ru:10331",
+ "rpc6.n3.nspcc.ru:10331",
+ "rpc7.n3.nspcc.ru:10331",
+ },
+ NeoFSContract: neofsMainnetAddress,
+ BalanceContract: balanceMainnetAddress,
+ },
+}
diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go
new file mode 100644
index 000000000..90edf7d75
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go
@@ -0,0 +1,433 @@
+package storagecfg
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math/rand"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+
+ netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "github.com/chzyer/readline"
+ "github.com/nspcc-dev/neo-go/cli/flags"
+ "github.com/nspcc-dev/neo-go/cli/input"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/wallet"
+
+ "github.com/spf13/cobra"
+)
+
+const (
+ walletFlag = "wallet"
+ accountFlag = "account"
+)
+
+const (
+ defaultControlEndpoint = "localhost:8090"
+ defaultDataEndpoint = "localhost"
+)
+
+// RootCmd is a root command of config section.
+var RootCmd = &cobra.Command{
+ Use: "storage-config [-w wallet] [-a acccount] []",
+ Short: "Section for storage node configuration commands",
+ Run: storageConfig,
+}
+
+func init() {
+ fs := RootCmd.Flags()
+
+ fs.StringP(walletFlag, "w", "", "Path to wallet")
+ fs.StringP(accountFlag, "a", "", "Wallet account")
+}
+
+type config struct {
+ AnnouncedAddress string
+ AuthorizedKeys []string
+ ControlEndpoint string
+ Endpoint string
+ TLSCert string
+ TLSKey string
+ MorphRPC []string
+ Attribute struct {
+ Locode string
+ }
+ Wallet struct {
+ Path string
+ Account string
+ Password string
+ }
+ Relay bool
+ BlobstorPath string
+ MetabasePath string
+}
+
+func storageConfig(cmd *cobra.Command, args []string) {
+ outPath := getOutputPath(args)
+
+ historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
+ readline.SetHistoryPath(historyPath)
+
+ var c config
+
+ c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
+ if c.Wallet.Path == "" {
+ c.Wallet.Path = getPath("Path to the storage node wallet: ")
+ }
+
+ w, err := wallet.NewWalletFromFile(c.Wallet.Path)
+ fatalOnErr(err)
+
+ fillWalletAccount(cmd, &c, w)
+
+ accH, err := flags.ParseAddress(c.Wallet.Account)
+ fatalOnErr(err)
+
+ acc := w.GetAccount(accH)
+ if acc == nil {
+ fatalOnErr(errors.New("can't find account in wallet"))
+ }
+
+ c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
+ fatalOnErr(err)
+
+ err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
+ fatalOnErr(err)
+
+ c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
+
+ network := readNetwork(cmd)
+
+ c.MorphRPC = n3config[network].MorphRPC
+
+ depositGas(cmd, acc, network)
+
+ c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
+
+ endpoint := getDefaultEndpoint(cmd, &c)
+ c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
+ if c.Endpoint == "" {
+ c.Endpoint = endpoint
+ }
+
+ c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
+ if c.ControlEndpoint == "" {
+ c.ControlEndpoint = defaultControlEndpoint
+ }
+
+ c.TLSCert = getPath("TLS Certificate (optional): ")
+ if c.TLSCert != "" {
+ c.TLSKey = getPath("TLS Key: ")
+ }
+
+ c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
+ if !c.Relay {
+ p := getPath("Path to the storage directory (all available storage will be used): ")
+ c.BlobstorPath = filepath.Join(p, "blob")
+ c.MetabasePath = filepath.Join(p, "meta")
+ }
+
+ out := applyTemplate(c)
+ fatalOnErr(os.WriteFile(outPath, out, 0644))
+
+ cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
+}
+
+func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
+ var addr, port string
+ for {
+ c.AnnouncedAddress = getString("Publicly announced address: ")
+ validator := netutil.Address{}
+ err := validator.FromString(c.AnnouncedAddress)
+ if err != nil {
+ cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
+ continue
+ }
+ uriAddr, err := url.Parse(validator.URIAddr())
+ if err != nil {
+ panic(fmt.Errorf("unexpected error: %w", err))
+ }
+ addr = uriAddr.Hostname()
+ port = uriAddr.Port()
+ ip, err := net.ResolveIPAddr("ip", addr)
+ if err != nil {
+ cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
+ continue
+ }
+
+ if !ip.IP.IsGlobalUnicast() {
+ cmd.Println("IP must be global unicast.")
+ continue
+ }
+ cmd.Printf("Resolved IP address: %s\n", ip.String())
+
+ _, err = strconv.ParseUint(port, 10, 16)
+ if err != nil {
+ cmd.Println("Port must be an integer.")
+ continue
+ }
+
+ break
+ }
+ return net.JoinHostPort(defaultDataEndpoint, port)
+}
+
+func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
+ c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
+ if c.Wallet.Account == "" {
+ addr := address.Uint160ToString(w.GetChangeAddress())
+ c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
+ if c.Wallet.Account == "" {
+ c.Wallet.Account = addr
+ }
+ }
+}
+
+func readNetwork(cmd *cobra.Command) string {
+ var network string
+ for {
+ network = getString("Choose network [mainnet]/testnet: ")
+ switch network {
+ case "":
+ network = "mainnet"
+ case "testnet", "mainnet":
+ default:
+ cmd.Println(`Network must be either "mainnet" or "testnet"`)
+ continue
+ }
+ break
+ }
+ return network
+}
+
+func getOutputPath(args []string) string {
+ if len(args) != 0 {
+ return args[0]
+ }
+ outPath := getPath("File to write config at [./config.yml]: ")
+ if outPath == "" {
+ outPath = "./config.yml"
+ }
+ return outPath
+}
+
+func getWalletAccount(w *wallet.Wallet, prompt string) string {
+ addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
+ for i := range w.Accounts {
+ addrs[i] = readline.PcItem(w.Accounts[i].Address)
+ }
+
+ readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
+ defer readline.SetAutoComplete(nil)
+
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+ return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
+}
+
+func getString(prompt string) string {
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+ if s != "" {
+ _ = readline.AddHistory(s)
+ }
+ return s
+}
+
+type filenameCompleter struct{}
+
+func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
+ prefix := string(line[:pos])
+ dir := filepath.Dir(prefix)
+ de, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, 0
+ }
+
+ for i := range de {
+ name := filepath.Join(dir, de[i].Name())
+ if strings.HasPrefix(name, prefix) {
+ tail := []rune(strings.TrimPrefix(name, prefix))
+ if de[i].IsDir() {
+ tail = append(tail, filepath.Separator)
+ }
+ newLine = append(newLine, tail)
+ }
+ }
+ if pos != 0 {
+ return newLine, pos - len([]rune(dir))
+ }
+ return newLine, 0
+}
+
+func getPath(prompt string) string {
+ readline.SetAutoComplete(filenameCompleter{})
+ defer readline.SetAutoComplete(nil)
+
+ p, err := readline.Line(prompt)
+ fatalOnErr(err)
+
+ if p == "" {
+ return p
+ }
+
+ _ = readline.AddHistory(p)
+
+ abs, err := filepath.Abs(p)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
+ }
+
+ return abs
+}
+
+func getConfirmation(def bool, prompt string) bool {
+ for {
+ s, err := readline.Line(prompt)
+ fatalOnErr(err)
+
+ switch strings.ToLower(s) {
+ case "y", "yes":
+ return true
+ case "n", "no":
+ return false
+ default:
+ if len(s) == 0 {
+ return def
+ }
+ }
+ }
+}
+
+func applyTemplate(c config) []byte {
+ tmpl, err := template.New("config").Parse(configTemplate)
+ fatalOnErr(err)
+
+ b := bytes.NewBuffer(nil)
+ fatalOnErr(tmpl.Execute(b, c))
+
+ return b.Bytes()
+}
+
+func fatalOnErr(err error) {
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
+ sideClient := initClient(n3config[network].MorphRPC)
+ balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
+
+ sideActor, err := actor.NewSimple(sideClient, acc)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
+ }
+
+ sideGas := nep17.NewReader(sideActor, balanceHash)
+ accSH := acc.Contract.ScriptHash()
+
+ balance, err := sideGas.BalanceOf(accSH)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("side chain balance: %w", err))
+ }
+
+ ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
+ fixedn.ToString(balance, 12)))
+ if !ok {
+ return
+ }
+
+ amountStr := getString("Enter amount in GAS: ")
+ amount, err := fixedn.FromString(amountStr, 8)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("invalid amount: %w", err))
+ }
+
+ mainClient := initClient(n3config[network].RPC)
+ neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
+
+ mainActor, err := actor.NewSimple(mainClient, acc)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
+ }
+
+ mainGas := nep17.New(mainActor, gas.Hash)
+
+ txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
+ if err != nil {
+ fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
+ }
+
+ cmd.Print("Waiting for transactions to persist.")
+ tick := time.NewTicker(time.Second / 2)
+ defer tick.Stop()
+
+ timer := time.NewTimer(time.Second * 20)
+ defer timer.Stop()
+
+ at := trigger.Application
+
+loop:
+ for {
+ select {
+ case <-tick.C:
+ _, err := mainClient.GetApplicationLog(txHash, &at)
+ if err == nil {
+ cmd.Print("\n")
+ break loop
+ }
+ cmd.Print(".")
+ case <-timer.C:
+ cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
+ if getConfirmation(false, "Continue configuration? yes/[no]: ") {
+ return
+ }
+ os.Exit(1)
+ }
+ }
+}
+
+func initClient(rpc []string) *rpcclient.Client {
+ var c *rpcclient.Client
+ var err error
+
+ shuffled := make([]string, len(rpc))
+ copy(shuffled, rpc)
+ rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
+
+ for _, endpoint := range shuffled {
+ c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
+ DialTimeout: time.Second * 2,
+ RequestTimeout: time.Second * 5,
+ })
+ if err != nil {
+ continue
+ }
+ if err = c.Init(); err != nil {
+ continue
+ }
+ return c
+ }
+
+ fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
+ panic("unreachable")
+}
diff --git a/cmd/frostfs-cli/docs/policy.md b/cmd/frostfs-cli/docs/policy.md
deleted file mode 100644
index 1d51818ac..000000000
--- a/cmd/frostfs-cli/docs/policy.md
+++ /dev/null
@@ -1,119 +0,0 @@
-# How manage local Access Policy Engine (APE) override of the node
-
-## Overview
-APE is a replacement for eACL. Each rule can restrict somehow access to the object/container or list of them.
-Here is a simple representation for the rule:
-`[:status_detail] ... ... ...`
-
-Rule start with `status`(with or without details), contains list of actions(which this rule regulate) or conditions
-(which can be under resource or request) and ends with list of resources.
-
-Resource is the combination of namespace, identificator of the FrostFS container/object and wildcard `*`.
-
-For object it can be represented as:
-- `namespace/cid/oid` object in the container of the namespace
-- `namespace/cid/*` all objects in the container of the namespace
-- `namespace/*` all objects in the namespace
-- `*` all objects
-- `/*` all object in the `root` namespace
-- `/cid/*` all objects in the container of the `root` namespace
-- `/cid/oid` object in the container of the `root` namespace
-
-For container it can be represented as:
-- `namespace/cid` container in the namespace
-- `namespace/*` all containers in the namespace
-- `*` all containers
-- `/cid` container in the `root` namespace
-- `/*` all containers in the `root` namespace
-
-Actions is a regular operations upon FrostFS containers/objects. Like `Object.Put`, `Container.Get` etc.
-You can use `Object.*`, `Container.*` that implies all actions.
-
-In status section it is possible to use `allow`, `deny` or `deny:QuotaLimitReached` actions.
-
-If a statement does not contain lexeme `any`, field `Any` is set to `false` by default. Otherwise, it is set
-to `true`. Optionally, `all` can be used - it also sets `Any=false`.
-
-It is prohibited to mix operation under FrostFS container and object in one rule.
-The same statement is equal for conditions and resources - one rule is for one type of items.
-
-## Add rule
-Local rule can be added with the command `frostfs-cli control add-rule`:
-```shell
-@:~$ frostfs-cli control add-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \
---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH \
---chain-id TestPolicy \
---rule "allow Object.Get Object.Head /*" --rule "deny Container.Put *"
-Parsed chain:
-Chain ID: TestPolicy
- HEX: 54657374506f6c696379
-Rules:
-
- Status: Allowed
- Any: false
- Conditions:
- Actions: Inverted:false
- GetObject
- HeadObject
- Resources: Inverted:false
- native:object//*
-
- Status: Access denied
- Any: false
- Conditions:
- Actions: Inverted:false
- PutContainer
- Resources: Inverted:false
- native:container/*
-
-Rule has been added.
-@:~$
-```
-## List rules
-Local rules can be listed with command `frostfs-cli control list-rules`:
-```shell
-@:~$ frostfs-cli control list-rules --endpoint s04.frostfs.devenv:8081 --address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM \
---cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH -w wallets/wallet.json
-Enter password >
-Chain ID: TestPolicy
- HEX: 54657374506f6c696379
-Rules:
-
- Status: Allowed
- Any: false
-...
-@:~$
-```
-
-## Get rule
-Rules can be retrieved with `frostfs-cli control get-rule`:
-```shell
-@:~$ frostfs-cli control get-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \
---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH \
---chain-id TestPolicy
-Parsed chain (chain id hex: '54657374506f6c696379'):
-Chain ID: TestPolicy
- HEX: 54657374506f6c696379
-Rules:
-
- Status: Allowed
- Any: false
-...
-@:~$
-```
-
-## Remove rule
-To remove rule need to use command `frostfs-cli control remove-rule`:
-```shell
-@:~$ frostfs-cli control remove-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \
---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH --chain-id TestPolicy
-Rule has been removed.
-@:~$ frostfs-cli control get-rule --endpoint s04.frostfs.devenv:8081 -c cnt_create_cfg.yml \
---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH --chain-id TestPolicy
-rpc error: rpc error: code = NotFound desc = chain not found
-@:~$ frostfs-cli control list-rules --endpoint s04.frostfs.devenv:8081 \
---address NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM --cid SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH -w wallets/wallet.json
-Enter password >
-Local overrides are not defined for the container.
-@:~$
-```
diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md
index 52c0e9b9b..04563b7af 100644
--- a/cmd/frostfs-cli/docs/sessions.md
+++ b/cmd/frostfs-cli/docs/sessions.md
@@ -72,3 +72,4 @@ All other `object` sub-commands support only static sessions (2).
List of commands supporting sessions (static only):
- `create`
- `delete`
+- `set-eacl`
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 299d0a830..9cb7c8161 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -2,27 +2,25 @@ package internal
import (
"bytes"
- "cmp"
"context"
"errors"
"fmt"
"io"
- "os"
- "slices"
+ "sort"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
-var errMissingHeaderInResponse = errors.New("missing header in response")
-
// BalanceOfPrm groups parameters of BalanceOf operation.
type BalanceOfPrm struct {
commonPrm
@@ -76,29 +74,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
- slices.SortFunc(list, cid.ID.Cmp)
+ sort.Slice(list, func(i, j int) bool {
+ lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
+ return strings.Compare(lhs, rhs) < 0
+ })
return list
}
-func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
- cliPrm := &client.PrmContainerListStream{
- XHeaders: prm.XHeaders,
- OwnerID: prm.OwnerID,
- Session: prm.Session,
- }
- rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
- if err != nil {
- return fmt.Errorf("init container list: %w", err)
- }
-
- err = rdr.Iterate(processCnr)
- if err != nil {
- return fmt.Errorf("read container list: %w", err)
- }
-
- return
-}
-
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client
@@ -205,6 +187,54 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
return
}
+// EACLPrm groups parameters of EACL operation.
+type EACLPrm struct {
+ Client *client.Client
+ ClientParams client.PrmContainerEACL
+}
+
+// EACLRes groups the resulting values of EACL operation.
+type EACLRes struct {
+ cliRes *client.ResContainerEACL
+}
+
+// EACL returns requested eACL table.
+func (x EACLRes) EACL() eacl.Table {
+ return x.cliRes.Table()
+}
+
+// EACL reads eACL table from FrostFS by container ID.
+//
+// Returns any error which prevented the operation from completing correctly in error return.
+func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
+ res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
+
+ return
+}
+
+// SetEACLPrm groups parameters of SetEACL operation.
+type SetEACLPrm struct {
+ Client *client.Client
+ ClientParams client.PrmContainerSetEACL
+}
+
+// SetEACLRes groups the resulting values of SetEACL operation.
+type SetEACLRes struct{}
+
+// SetEACL requests to save an eACL table in FrostFS.
+//
+// Operation is asynchronous and no guaranteed even in the absence of errors.
+// The required time is also not predictable.
+//
+// Success can be verified by reading by container identifier.
+//
+// Returns any error which prevented the operation from completing correctly in error return.
+func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
+ _, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
+
+ return
+}
+
// NetworkInfoPrm groups parameters of NetworkInfo operation.
type NetworkInfoPrm struct {
Client *client.Client
@@ -323,7 +353,7 @@ type PutObjectPrm struct {
rdr io.Reader
- headerCallback func()
+ headerCallback func(*objectSDK.Object)
prepareLocally bool
}
@@ -340,7 +370,7 @@ func (x *PutObjectPrm) SetPayloadReader(rdr io.Reader) {
// SetHeaderCallback sets callback which is called on the object after the header is received
// but before the payload is written.
-func (x *PutObjectPrm) SetHeaderCallback(f func()) {
+func (x *PutObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) {
x.headerCallback = f
}
@@ -357,23 +387,30 @@ func (x *PutObjectPrm) PrepareLocally() {
}
func (x *PutObjectPrm) convertToSDKPrm(ctx context.Context) (client.PrmObjectPutInit, error) {
- putPrm := client.PrmObjectPutInit{
- XHeaders: x.xHeaders,
- BearerToken: x.bearerToken,
- Local: x.local,
- CopiesNumber: x.copyNum,
+ var putPrm client.PrmObjectPutInit
+ if !x.prepareLocally && x.sessionToken != nil {
+ putPrm.WithinSession(*x.sessionToken)
}
+ if x.bearerToken != nil {
+ putPrm.WithBearerToken(*x.bearerToken)
+ }
+
+ if x.local {
+ putPrm.MarkLocal()
+ }
+
+ putPrm.WithXHeaders(x.xHeaders...)
+ putPrm.SetCopiesNumberByVectors(x.copyNum)
+
if x.prepareLocally {
res, err := x.cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return client.PrmObjectPutInit{}, err
}
- putPrm.MaxSize = res.Info().MaxObjectSize()
- putPrm.EpochSource = epochSource(res.Info().CurrentEpoch())
- putPrm.WithoutHomomorphHash = res.Info().HomomorphicHashingDisabled()
- } else {
- putPrm.Session = x.sessionToken
+ putPrm.WithObjectMaxSize(res.Info().MaxObjectSize())
+ putPrm.WithEpochSource(epochSource(res.Info().CurrentEpoch()))
+ putPrm.WithoutHomomorphicHash(res.Info().HomomorphicHashingDisabled())
}
return putPrm, nil
}
@@ -409,7 +446,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
if wrt.WriteHeader(ctx, *prm.hdr) {
if prm.headerCallback != nil {
- prm.headerCallback()
+ prm.headerCallback(prm.hdr)
}
sz := prm.hdr.PayloadSize()
@@ -579,6 +616,13 @@ type HeadObjectPrm struct {
commonObjectPrm
objectAddressPrm
rawPrm
+
+ mainOnly bool
+}
+
+// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
+func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
+ x.mainOnly = v
}
// HeadObjectRes groups the resulting values of HeadObject operation.
@@ -617,7 +661,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
var hdr objectSDK.Object
if !res.ReadHeader(&hdr) {
- return nil, errMissingHeaderInResponse
+ return nil, fmt.Errorf("missing header in response")
}
return &HeadObjectRes{
@@ -652,15 +696,24 @@ func (x SearchObjectsRes) IDList() []oid.ID {
//
// Returns any error which prevented the operation from completing correctly in error return.
func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
- cliPrm := client.PrmObjectSearch{
- XHeaders: prm.xHeaders,
- Local: prm.local,
- BearerToken: prm.bearerToken,
- Session: prm.sessionToken,
- ContainerID: &prm.cnrID,
- Filters: prm.filters,
+ var cliPrm client.PrmObjectSearch
+ cliPrm.InContainer(prm.cnrID)
+ cliPrm.SetFilters(prm.filters)
+
+ if prm.sessionToken != nil {
+ cliPrm.WithinSession(*prm.sessionToken)
}
+ if prm.bearerToken != nil {
+ cliPrm.WithBearerToken(*prm.bearerToken)
+ }
+
+ if prm.local {
+ cliPrm.MarkLocal()
+ }
+
+ cliPrm.WithXHeaders(prm.xHeaders...)
+
rdr, err := prm.cli.ObjectSearchInit(ctx, cliPrm)
if err != nil {
return nil, fmt.Errorf("init object search: %w", err)
@@ -673,7 +726,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
- list = append(list, buf[:n]...)
+ for i := 0; i < n; i++ {
+ list = append(list, buf[i])
+ }
if !ok {
break
}
@@ -684,7 +739,10 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err)
}
- slices.SortFunc(list, oid.ID.Cmp)
+ sort.Slice(list, func(i, j int) bool {
+ lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
+ return strings.Compare(lhs, rhs) < 0
+ })
return &SearchObjectsRes{
ids: list,
@@ -848,71 +906,3 @@ func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncCont
return new(SyncContainerRes), nil
}
-
-// PatchObjectPrm groups parameters of PatchObject operation.
-type PatchObjectPrm struct {
- commonObjectPrm
- objectAddressPrm
-
- NewAttributes []objectSDK.Attribute
-
- ReplaceAttribute bool
-
- NewSplitHeader *objectSDK.SplitHeader
-
- PayloadPatches []PayloadPatch
-}
-
-type PayloadPatch struct {
- Range objectSDK.Range
-
- PayloadPath string
-}
-
-type PatchRes struct {
- OID oid.ID
-}
-
-func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
- patchPrm := client.PrmObjectPatch{
- XHeaders: prm.xHeaders,
- BearerToken: prm.bearerToken,
- Session: prm.sessionToken,
- Address: prm.objAddr,
- }
-
- slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int {
- return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset())
- })
-
- patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm)
- if err != nil {
- return nil, fmt.Errorf("init payload reading: %w", err)
- }
-
- if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
- NewSplitHeader: prm.NewSplitHeader,
- NewAttributes: prm.NewAttributes,
- ReplaceAttributes: prm.ReplaceAttribute,
- }) {
- for _, pp := range prm.PayloadPatches {
- payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
- if err != nil {
- return nil, err
- }
- applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile)
- _ = payloadFile.Close()
- if !applied {
- break
- }
- }
- }
-
- res, err := patcher.Close(ctx)
- if err != nil {
- return nil, err
- }
- return &PatchRes{
- OID: res.ObjectID(),
- }, nil
-}
diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go
index 1eadfa2e1..79d3dcb0d 100644
--- a/cmd/frostfs-cli/internal/client/sdk.go
+++ b/cmd/frostfs-cli/internal/client/sdk.go
@@ -34,10 +34,6 @@ func GetSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag
func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag string) (*client.Client, error) {
var addr network.Address
- if len(viper.GetString(endpointFlag)) == 0 {
- return nil, fmt.Errorf("%s is not defined", endpointFlag)
- }
-
err := addr.FromString(viper.GetString(endpointFlag))
if err != nil {
return nil, fmt.Errorf("%v: %w", errInvalidEndpoint, err)
@@ -47,29 +43,27 @@ func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag
// GetSDKClient returns default frostfs-sdk-go client.
func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) {
- var c client.Client
+ var (
+ c client.Client
+ prmInit client.PrmInit
+ prmDial client.PrmDial
+ )
- prmInit := client.PrmInit{
- Key: *key,
- }
-
- prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
- grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- },
- }
+ prmInit.SetDefaultPrivateKey(*key)
+ prmInit.ResolveFrostFSFailures()
+ prmDial.SetServerURI(addr.URIAddr())
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
// In CLI we can only set a timeout for the whole operation.
// By also setting stream timeout we ensure that no operation hands
// for too long.
- prmDial.DialTimeout = timeout
- prmDial.StreamTimeout = timeout
+ prmDial.SetTimeout(timeout)
+ prmDial.SetStreamTimeout(timeout)
common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
}
+ prmDial.SetGRPCDialOptions(
+ grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
+ grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()))
c.Init(prmInit)
diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go
index 10863ed1e..30c2f2b1a 100644
--- a/cmd/frostfs-cli/internal/common/tracing.go
+++ b/cmd/frostfs-cli/internal/common/tracing.go
@@ -2,7 +2,7 @@ package common
import (
"context"
- "slices"
+ "sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
@@ -45,11 +45,15 @@ func StartClientCommandSpan(cmd *cobra.Command) {
})
commonCmd.ExitOnErr(cmd, "init tracing: %w", err)
- var components []string
+ var components sort.StringSlice
for c := cmd; c != nil; c = c.Parent() {
components = append(components, c.Name())
}
- slices.Reverse(components)
+ for i, j := 0, len(components)-1; i < j; {
+ components.Swap(i, j)
+ i++
+ j--
+ }
operation := strings.Join(components, ".")
ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation)
diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go
index 6ed21e107..88321176f 100644
--- a/cmd/frostfs-cli/internal/commonflags/api.go
+++ b/cmd/frostfs-cli/internal/commonflags/api.go
@@ -9,7 +9,7 @@ const (
TTL = "ttl"
TTLShorthand = ""
TTLDefault = 2
- TTLUsage = "The maximum number of intermediate nodes in the request route"
+ TTLUsage = "TTL value in request meta header"
XHeadersKey = "xhdr"
XHeadersShorthand = "x"
diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go
index fad1f6183..5049dc3b1 100644
--- a/cmd/frostfs-cli/internal/commonflags/flags.go
+++ b/cmd/frostfs-cli/internal/commonflags/flags.go
@@ -11,9 +11,9 @@ import (
// values and their usage descriptions.
const (
GenerateKey = "generate-key"
- GenerateKeyShorthand = "g"
- GenerateKeyDefault = false
- GenerateKeyUsage = "Generate new private key"
+ generateKeyShorthand = "g"
+ generateKeyDefault = false
+ generateKeyUsage = "Generate new private key"
WalletPath = "wallet"
WalletPathShorthand = "w"
@@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint"
RPCShorthand = "r"
RPCDefault = ""
- RPCUsage = "Remote node address (':' or 'grpcs://:')"
+ RPCUsage = "Remote node address (as 'multiaddr' or ':')"
Timeout = "timeout"
TimeoutShorthand = "t"
@@ -50,13 +50,6 @@ const (
TracingFlag = "trace"
TracingFlagUsage = "Generate trace ID and print it."
-
- AwaitFlag = "await"
- AwaitFlagUsage = "Wait for the operation to complete"
-
- QuietFlag = "quiet"
- QuietFlagShorthand = "q"
- QuietFlagUsage = "Print nothing and exit with non-zero code on failure"
)
// Init adds common flags to the command:
@@ -79,7 +72,7 @@ func Init(cmd *cobra.Command) {
func InitWithoutRPC(cmd *cobra.Command) {
ff := cmd.Flags()
- ff.BoolP(GenerateKey, GenerateKeyShorthand, GenerateKeyDefault, GenerateKeyUsage)
+ ff.BoolP(GenerateKey, generateKeyShorthand, generateKeyDefault, generateKeyUsage)
ff.StringP(WalletPath, WalletPathShorthand, WalletPathDefault, WalletPathUsage)
ff.StringP(Account, AccountShorthand, AccountDefault, AccountUsage)
}
diff --git a/cmd/frostfs-cli/internal/key/key_test.go b/cmd/frostfs-cli/internal/key/key_test.go
index 37e4fd4ee..e3127a3fe 100644
--- a/cmd/frostfs-cli/internal/key/key_test.go
+++ b/cmd/frostfs-cli/internal/key/key_test.go
@@ -24,8 +24,6 @@ var testCmd = &cobra.Command{
}
func Test_getOrGenerate(t *testing.T) {
- t.Cleanup(viper.Reset)
-
dir := t.TempDir()
wallPath := filepath.Join(dir, "wallet.json")
diff --git a/cmd/frostfs-cli/modules/accounting/balance.go b/cmd/frostfs-cli/modules/accounting/balance.go
index 1364b5e8e..5ed8f9403 100644
--- a/cmd/frostfs-cli/modules/accounting/balance.go
+++ b/cmd/frostfs-cli/modules/accounting/balance.go
@@ -23,7 +23,7 @@ var accountingBalanceCmd = &cobra.Command{
Use: "balance",
Short: "Get internal balance of FrostFS account",
Long: `Get internal balance of FrostFS account`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
var idUser user.ID
pk := key.GetOrGenerate(cmd)
@@ -39,7 +39,7 @@ var accountingBalanceCmd = &cobra.Command{
var prm internalclient.BalanceOfPrm
prm.SetClient(cli)
- prm.Account = idUser
+ prm.SetAccount(idUser)
res, err := internalclient.BalanceOf(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
diff --git a/cmd/frostfs-cli/modules/accounting/root.go b/cmd/frostfs-cli/modules/accounting/root.go
index afeca7626..f94488b6f 100644
--- a/cmd/frostfs-cli/modules/accounting/root.go
+++ b/cmd/frostfs-cli/modules/accounting/root.go
@@ -1,6 +1,7 @@
package accounting
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -11,13 +12,15 @@ var Cmd = &cobra.Command{
Use: "accounting",
Short: "Operations with accounts and balances",
Long: `Operations with accounts and balances`,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
flags := cmd.Flags()
_ = viper.BindPFlag(commonflags.WalletPath, flags.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, flags.Lookup(commonflags.Account))
_ = viper.BindPFlag(commonflags.RPC, flags.Lookup(commonflags.RPC))
+ common.StartClientCommandSpan(cmd)
},
+ PersistentPostRun: common.StopClientCommandSpan,
}
func init() {
diff --git a/cmd/frostfs-cli/modules/acl/extended/create.go b/cmd/frostfs-cli/modules/acl/extended/create.go
index 59dfabba2..7da26f8eb 100644
--- a/cmd/frostfs-cli/modules/acl/extended/create.go
+++ b/cmd/frostfs-cli/modules/acl/extended/create.go
@@ -106,7 +106,7 @@ func createEACL(cmd *cobra.Command, _ []string) {
return
}
- err = os.WriteFile(outArg, buf.Bytes(), 0o644)
+ err = os.WriteFile(outArg, buf.Bytes(), 0644)
if err != nil {
cmd.PrintErrln(err)
os.Exit(1)
diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
deleted file mode 100644
index f4039283f..000000000
--- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package apemanager
-
-import (
- "fmt"
-
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/spf13/cobra"
-)
-
-var addCmd = &cobra.Command{
- Use: "add",
- Short: "Add rule chain for a target",
- Run: add,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- commonflags.Bind(cmd)
- },
-}
-
-func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) {
- t := apeCmd.ParseTarget(cmd)
-
- ct.Name = t.Name
-
- switch t.Type {
- case engine.Namespace:
- ct.TargetType = apeSDK.TargetTypeNamespace
- case engine.Container:
- ct.TargetType = apeSDK.TargetTypeContainer
- case engine.User:
- ct.TargetType = apeSDK.TargetTypeUser
- case engine.Group:
- ct.TargetType = apeSDK.TargetTypeGroup
- default:
- commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type))
- }
- return ct
-}
-
-func parseChain(cmd *cobra.Command) apeSDK.Chain {
- c := apeCmd.ParseChain(cmd)
- serialized := c.Bytes()
- return apeSDK.Chain{
- Raw: serialized,
- }
-}
-
-func add(cmd *cobra.Command, _ []string) {
- c := parseChain(cmd)
-
- target := parseTarget(cmd)
-
- key := key.Get(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
-
- res, err := cli.APEManagerAddChain(cmd.Context(), client_sdk.PrmAPEManagerAddChain{
- ChainTarget: target,
- Chain: c,
- })
-
- commonCmd.ExitOnErr(cmd, "add chain error: %w", err)
-
- cmd.Println("Rule has been added.")
- cmd.Println("Chain ID: ", string(res.ChainID))
-}
-
-func initAddCmd() {
- commonflags.Init(addCmd)
-
- ff := addCmd.Flags()
- ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc)
- ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc)
- ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
-
- addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
-}
diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go
deleted file mode 100644
index b07ecc52f..000000000
--- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package apemanager
-
-import (
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/spf13/cobra"
-)
-
-var listCmd = &cobra.Command{
- Use: "list",
- Short: "List rule chains defined on target",
- Run: list,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- commonflags.Bind(cmd)
- },
-}
-
-func list(cmd *cobra.Command, _ []string) {
- target := parseTarget(cmd)
-
- key := key.Get(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
-
- resp, err := cli.APEManagerListChains(cmd.Context(),
- client_sdk.PrmAPEManagerListChains{
- ChainTarget: target,
- })
- commonCmd.ExitOnErr(cmd, "list chains call error: %w", err)
-
- for _, respChain := range resp.Chains {
- var chain apechain.Chain
- commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw))
- apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
- }
-}
-
-func initListCmd() {
- commonflags.Init(listCmd)
-
- ff := listCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
-}
diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
deleted file mode 100644
index 136ca81c3..000000000
--- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package apemanager
-
-import (
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "github.com/spf13/cobra"
-)
-
-var removeCmd = &cobra.Command{
- Use: "remove",
- Short: "Remove rule chain for a target",
- Run: remove,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- commonflags.Bind(cmd)
- },
-}
-
-func remove(cmd *cobra.Command, _ []string) {
- target := parseTarget(cmd)
-
- key := key.Get(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
-
- chainID := apeCmd.ParseChainID(cmd)
- chainIDRaw := []byte(chainID)
-
- _, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{
- ChainTarget: target,
- ChainID: chainIDRaw,
- })
-
- commonCmd.ExitOnErr(cmd, "remove chain error: %w", err)
-
- cmd.Println("\nRule has been removed.")
-}
-
-func initRemoveCmd() {
- commonflags.Init(removeCmd)
-
- ff := removeCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc)
- _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc)
-}
diff --git a/cmd/frostfs-cli/modules/ape_manager/root.go b/cmd/frostfs-cli/modules/ape_manager/root.go
deleted file mode 100644
index 7b4f92921..000000000
--- a/cmd/frostfs-cli/modules/ape_manager/root.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package apemanager
-
-import (
- "github.com/spf13/cobra"
-)
-
-var Cmd = &cobra.Command{
- Use: "ape-manager",
- Short: "Operations with APE manager",
- Long: `Operations with APE manager`,
-}
-
-func init() {
- Cmd.AddCommand(addCmd)
- Cmd.AddCommand(removeCmd)
- Cmd.AddCommand(listCmd)
-
- initAddCmd()
- initRemoveCmd()
- initListCmd()
-}
diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go
index 0927788ba..b85115047 100644
--- a/cmd/frostfs-cli/modules/bearer/create.go
+++ b/cmd/frostfs-cli/modules/bearer/create.go
@@ -15,12 +15,10 @@ import (
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
- "github.com/spf13/viper"
)
const (
eaclFlag = "eacl"
- apeFlag = "ape"
issuedAtFlag = "issued-at"
notValidBeforeFlag = "not-valid-before"
ownerFlag = "owner"
@@ -39,18 +37,10 @@ In this case --` + commonflags.RPC + ` flag should be specified and the epoch in
is set to current epoch + n.
`,
Run: createToken,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- ff := cmd.Flags()
-
- _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
- _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
- _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
- },
}
func init() {
- createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate and --ape flag)")
- createCmd.Flags().StringP(apeFlag, "a", "", "Path to the JSON-encoded APE override (mutually exclusive with --impersonate and --eacl flag)")
+ createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate flag)")
createCmd.Flags().StringP(issuedAtFlag, "i", "+0", "Epoch to issue token at")
createCmd.Flags().StringP(notValidBeforeFlag, "n", "+0", "Not valid before epoch")
createCmd.Flags().StringP(commonflags.ExpireAt, "x", "", "The last active epoch for the token")
@@ -59,15 +49,13 @@ func init() {
createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON")
createCmd.Flags().Bool(impersonateFlag, false, "Mark token as impersonate to consider the token signer as the request owner (mutually exclusive with --eacl flag)")
createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
- createCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
- createCmd.Flags().StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
- createCmd.MarkFlagsMutuallyExclusive(eaclFlag, apeFlag, impersonateFlag)
+ createCmd.MarkFlagsMutuallyExclusive(eaclFlag, impersonateFlag)
_ = cobra.MarkFlagFilename(createCmd.Flags(), eaclFlag)
- _ = cobra.MarkFlagFilename(createCmd.Flags(), apeFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.ExpireAt)
+ _ = cobra.MarkFlagRequired(createCmd.Flags(), ownerFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag)
}
@@ -82,7 +70,7 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative {
- endpoint := viper.GetString(commonflags.RPC)
+ endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
}
@@ -108,16 +96,16 @@ func createToken(cmd *cobra.Command, _ []string) {
fmt.Errorf("expiration epoch is less than not-valid-before epoch: %d < %d", exp, nvb))
}
+ ownerStr, _ := cmd.Flags().GetString(ownerFlag)
+
+ var ownerID user.ID
+ commonCmd.ExitOnErr(cmd, "can't parse recipient: %w", ownerID.DecodeString(ownerStr))
+
var b bearer.Token
b.SetExp(exp)
b.SetNbf(nvb)
b.SetIat(iat)
-
- if ownerStr, _ := cmd.Flags().GetString(ownerFlag); ownerStr != "" {
- var ownerID user.ID
- commonCmd.ExitOnErr(cmd, "can't parse recipient: %w", ownerID.DecodeString(ownerStr))
- b.ForUser(ownerID)
- }
+ b.ForUser(ownerID)
impersonate, _ := cmd.Flags().GetBool(impersonateFlag)
b.SetImpersonate(impersonate)
@@ -131,14 +119,6 @@ func createToken(cmd *cobra.Command, _ []string) {
b.SetEACLTable(*table)
}
- apePath, _ := cmd.Flags().GetString(apeFlag)
- if apePath != "" {
- var apeOverride bearer.APEOverride
- raw, err := os.ReadFile(apePath)
- commonCmd.ExitOnErr(cmd, "can't read APE rules: %w", err)
- commonCmd.ExitOnErr(cmd, "can't parse APE rules: %w", json.Unmarshal(raw, &apeOverride))
- b.SetAPEOverride(apeOverride)
- }
var data []byte
toJSON, _ := cmd.Flags().GetBool(jsonFlag)
@@ -150,6 +130,6 @@ func createToken(cmd *cobra.Command, _ []string) {
}
out, _ := cmd.Flags().GetString(outFlag)
- err = os.WriteFile(out, data, 0o644)
+ err = os.WriteFile(out, data, 0644)
commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err)
}
diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go
deleted file mode 100644
index 9632061f1..000000000
--- a/cmd/frostfs-cli/modules/bearer/generate_override.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package bearer
-
-import (
- "fmt"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "github.com/spf13/cobra"
-)
-
-const (
- outputFlag = "output"
-)
-
-var generateAPEOverrideCmd = &cobra.Command{
- Use: "generate-ape-override",
- Short: "Generate APE override.",
- Long: `Generate APE override by target and APE chains. Util command.
-
-Generated APE override can be dumped to a file in JSON format that is passed to
-"create" command.
-`,
- Run: genereateAPEOverride,
-}
-
-func genereateAPEOverride(cmd *cobra.Command, _ []string) {
- c := apeCmd.ParseChain(cmd)
-
- targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag)
- var cid cidSDK.ID
- commonCmd.ExitOnErr(cmd, "invalid cid format: %w", cid.DecodeString(targetCID))
-
- override := &bearer.APEOverride{
- Target: apeSDK.ChainTarget{
- TargetType: apeSDK.TargetTypeContainer,
- Name: targetCID,
- },
- Chains: []apeSDK.Chain{
- {
- Raw: c.Bytes(),
- },
- },
- }
-
- overrideMarshalled, err := override.MarshalJSON()
- commonCmd.ExitOnErr(cmd, "failed to marshal APE override: %w", err)
-
- outputPath, _ := cmd.Flags().GetString(outputFlag)
- if outputPath != "" {
- err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
- commonCmd.ExitOnErr(cmd, "dump error: %w", err)
- } else {
- fmt.Print("\n")
- fmt.Println(string(overrideMarshalled))
- }
-}
-
-func init() {
- ff := generateAPEOverrideCmd.Flags()
-
- ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.")
- _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag)
-
- ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
- ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
- ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
-
- ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override")
- _ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag)
-}
diff --git a/cmd/frostfs-cli/modules/bearer/root.go b/cmd/frostfs-cli/modules/bearer/root.go
index fa6aef6fb..200d096ac 100644
--- a/cmd/frostfs-cli/modules/bearer/root.go
+++ b/cmd/frostfs-cli/modules/bearer/root.go
@@ -11,5 +11,4 @@ var Cmd = &cobra.Command{
func init() {
Cmd.AddCommand(createCmd)
- Cmd.AddCommand(generateAPEOverrideCmd)
}
diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go
index 30f995180..37827b617 100644
--- a/cmd/frostfs-cli/modules/container/create.go
+++ b/cmd/frostfs-cli/modules/container/create.go
@@ -7,20 +7,22 @@ import (
"strings"
"time"
+ containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
var (
+ containerACL string
containerPolicy string
containerAttributes []string
containerAwait bool
@@ -36,7 +38,7 @@ var createContainerCmd = &cobra.Command{
Short: "Create new container",
Long: `Create new container and register it in the FrostFS.
It will be stored in sidechain when inner ring will accepts it.`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
placementPolicy, err := parseContainerPolicy(cmd, containerPolicy)
commonCmd.ExitOnErr(cmd, "", err)
@@ -56,27 +58,13 @@ It will be stored in sidechain when inner ring will accepts it.`,
"use --force option to skip this check: %w", err)
for i, nodes := range nodesByRep {
- if repNum := placementPolicy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 {
- if repNum > uint32(len(nodes)) {
- commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
- "the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+
- "use --force option to skip this check",
- len(nodes),
- repNum,
- ))
- }
- } else if ecParts := placementPolicy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 {
- if ecParts > uint32(len(nodes)) {
- commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
- "the number of nodes '%d' in selector is not enough for EC placement '%d.%d', "+
- "use --force option to skip this check",
- len(nodes),
- placementPolicy.ReplicaDescriptor(i).GetECDataCount(),
- placementPolicy.ReplicaDescriptor(i).GetECParityCount(),
- ))
- }
- } else {
- commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set"))
+ if placementPolicy.ReplicaNumberByIndex(i) > uint32(len(nodes)) {
+ commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
+ "the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+
+ "use --force option to skip this check",
+ len(nodes),
+ placementPolicy.ReplicaNumberByIndex(i),
+ ))
}
}
}
@@ -87,6 +75,9 @@ It will be stored in sidechain when inner ring will accepts it.`,
err = parseAttributes(&cnr, containerAttributes)
commonCmd.ExitOnErr(cmd, "", err)
+ var basicACL acl.Basic
+ commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL))
+
tok := getSession(cmd)
if tok != nil {
@@ -100,6 +91,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
}
cnr.SetPlacementPolicy(*placementPolicy)
+ cnr.SetBasicACL(basicACL)
var syncContainerPrm internalclient.SyncContainerPrm
syncContainerPrm.SetClient(cli)
@@ -121,7 +113,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
id := res.ID()
- cmd.Println("CID:", id)
+ cmd.Println("container ID:", id)
if containerAwait {
cmd.Println("awaiting...")
@@ -133,7 +125,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
},
}
- for range awaitTimeout {
+ for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
@@ -157,6 +149,10 @@ func initContainerCreateCmd() {
flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
+
+ flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'",
+ acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended,
+ ))
flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it")
flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2")
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted")
diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go
index c20188884..20de84f74 100644
--- a/cmd/frostfs-cli/modules/container/delete.go
+++ b/cmd/frostfs-cli/modules/container/delete.go
@@ -20,7 +20,7 @@ var deleteContainerCmd = &cobra.Command{
Short: "Delete existing container",
Long: `Delete existing container.
Only owner of the container has a permission to remove container.`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
id := parseContainerID(cmd)
tok := getSession(cmd)
@@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
},
}
- for range awaitTimeout {
+ for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go
index fac6eb2cd..f0590c7a1 100644
--- a/cmd/frostfs-cli/modules/container/get.go
+++ b/cmd/frostfs-cli/modules/container/get.go
@@ -33,7 +33,7 @@ var getContainerInfoCmd = &cobra.Command{
Use: "get",
Short: "Get container field info",
Long: `Get container field info`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
cnr, _ := getContainer(cmd)
prettyPrintContainer(cmd, cnr, containerJSON)
@@ -51,7 +51,7 @@ var getContainerInfoCmd = &cobra.Command{
data = cnr.Marshal()
}
- err = os.WriteFile(containerPathTo, data, 0o644)
+ err = os.WriteFile(containerPathTo, data, 0644)
commonCmd.ExitOnErr(cmd, "can't write container to file: %w", err)
}
},
@@ -83,7 +83,7 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod
var id cid.ID
container.CalculateID(&id, cnr)
- cmd.Println("CID:", id)
+ cmd.Println("container ID:", id)
cmd.Println("owner ID:", cnr.Owner())
@@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod
cmd.Println("created:", container.CreatedAt(cnr))
cmd.Println("attributes:")
- for key, val := range cnr.Attributes() {
+ cnr.IterateAttributes(func(key, val string) {
cmd.Printf("\t%s=%s\n", key, val)
- }
+ })
cmd.Println("placement policy:")
commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd)))
diff --git a/cmd/frostfs-cli/modules/container/get_eacl.go b/cmd/frostfs-cli/modules/container/get_eacl.go
new file mode 100644
index 000000000..6746132eb
--- /dev/null
+++ b/cmd/frostfs-cli/modules/container/get_eacl.go
@@ -0,0 +1,68 @@
+package container
+
+import (
+ "os"
+
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "github.com/spf13/cobra"
+)
+
+var getExtendedACLCmd = &cobra.Command{
+ Use: "get-eacl",
+ Short: "Get extended ACL table of container",
+ Long: `Get extended ACL table of container`,
+ Run: func(cmd *cobra.Command, args []string) {
+ id := parseContainerID(cmd)
+ pk := key.GetOrGenerate(cmd)
+ cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
+
+ eaclPrm := internalclient.EACLPrm{
+ Client: cli,
+ ClientParams: client.PrmContainerEACL{
+ ContainerID: &id,
+ },
+ }
+
+ res, err := internalclient.EACL(cmd.Context(), eaclPrm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ eaclTable := res.EACL()
+
+ if containerPathTo == "" {
+ cmd.Println("eACL: ")
+ common.PrettyPrintJSON(cmd, &eaclTable, "eACL")
+
+ return
+ }
+
+ var data []byte
+
+ if containerJSON {
+ data, err = eaclTable.MarshalJSON()
+ commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err)
+ } else {
+ data, err = eaclTable.Marshal()
+ commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err)
+ }
+
+ cmd.Println("dumping data to file:", containerPathTo)
+
+ err = os.WriteFile(containerPathTo, data, 0644)
+ commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
+ },
+}
+
+func initContainerGetEACLCmd() {
+ commonflags.Init(getExtendedACLCmd)
+
+ flags := getExtendedACLCmd.Flags()
+
+ flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
+ flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)")
+ flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format")
+}
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index e4a023d91..1c7787760 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -1,16 +1,16 @@
package container
import (
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
// flags of list command.
@@ -18,8 +18,6 @@ const (
flagListPrintAttr = "with-attr"
flagListContainerOwner = "owner"
flagListName = "name"
-
- generateKeyContainerUsage = commonflags.GenerateKeyUsage + ", should be used with --owner flag"
)
// flag vars of list command.
@@ -33,14 +31,9 @@ var listContainersCmd = &cobra.Command{
Use: "list",
Short: "List all created containers",
Long: "List all created containers",
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
var idUser user.ID
- generateKey, _ := cmd.Flags().GetBool(commonflags.GenerateKey)
- if flagVarListContainerOwner == "" && generateKey {
- cmd.PrintErrln("WARN: using -g without --owner - output will be empty")
- }
-
key := key.GetOrGenerate(cmd)
if flagVarListContainerOwner == "" {
@@ -54,60 +47,49 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
- prm.OwnerID = idUser
+ prm.SetAccount(idUser)
+
+ res, err := internalclient.ListContainers(cmd.Context(), prm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
- var containerIDs []cid.ID
-
- err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
- printContainer(cmd, prmGet, id)
- return false
- })
- if err == nil {
- return
- }
-
- if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
- res, err := internalclient.ListContainers(cmd.Context(), prm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
- containerIDs = res.SortedIDList()
- } else {
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
- }
+ containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
- printContainer(cmd, prmGet, cnrID)
+ if flagVarListName == "" && !flagVarListPrintAttr {
+ cmd.Println(cnrID.String())
+ continue
+ }
+
+ cnrID := cnrID
+ prmGet.ClientParams.ContainerID = &cnrID
+ res, err := internalclient.GetContainer(cmd.Context(), prmGet)
+ if err != nil {
+ cmd.Printf(" failed to read attributes: %v\n", err)
+ continue
+ }
+
+ cnr := res.Container()
+ if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
+ continue
+ }
+ cmd.Println(cnrID.String())
+
+ if flagVarListPrintAttr {
+ cnr.IterateAttributes(func(key, val string) {
+ if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
+ // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
+ // Use dedicated method to skip system attributes.
+ cmd.Printf(" %s: %s\n", key, val)
+ }
+ })
+ }
}
},
}
-func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
- if flagVarListName == "" && !flagVarListPrintAttr {
- cmd.Println(id.String())
- return
- }
-
- prmGet.ClientParams.ContainerID = &id
- res, err := internalclient.GetContainer(cmd.Context(), prmGet)
- if err != nil {
- cmd.Printf(" failed to read attributes: %v\n", err)
- return
- }
-
- cnr := res.Container()
- if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
- return
- }
- cmd.Println(id.String())
-
- if flagVarListPrintAttr {
- for key, val := range cnr.Attributes() {
- cmd.Printf(" %s: %s\n", key, val)
- }
- }
-}
-
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)
@@ -122,5 +104,4 @@ func initContainerListContainersCmd() {
flags.BoolVar(&flagVarListPrintAttr, flagListPrintAttr, false,
"Request and print attributes of each container",
)
- flags.Lookup(commonflags.GenerateKey).Usage = generateKeyContainerUsage
}
diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go
index d5850359d..1e0aeb4db 100644
--- a/cmd/frostfs-cli/modules/container/list_objects.go
+++ b/cmd/frostfs-cli/modules/container/list_objects.go
@@ -1,6 +1,9 @@
package container
import (
+ "strings"
+
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -25,7 +28,7 @@ var listContainerObjectsCmd = &cobra.Command{
Use: "list-objects",
Short: "List existing objects in container",
Long: `List existing objects in container`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
id := parseContainerID(cmd)
filters := new(objectSDK.SearchFilters)
@@ -64,8 +67,14 @@ var listContainerObjectsCmd = &cobra.Command{
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- for _, attr := range resHead.Header().UserAttributes() {
- cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
+ attrs := resHead.Header().Attributes()
+ for i := range attrs {
+ attrKey := attrs[i].Key()
+ if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
+ // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
+ // Use dedicated method to skip system attributes.
+ cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
+ }
}
} else {
cmd.Printf(" failed to read attributes: %v\n", err)
diff --git a/cmd/frostfs-cli/modules/container/nodes.go b/cmd/frostfs-cli/modules/container/nodes.go
index 1ae8ab604..8b0f266a7 100644
--- a/cmd/frostfs-cli/modules/container/nodes.go
+++ b/cmd/frostfs-cli/modules/container/nodes.go
@@ -2,7 +2,6 @@ package container
import (
"crypto/sha256"
- "errors"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
@@ -20,8 +19,8 @@ var containerNodesCmd = &cobra.Command{
Use: "nodes",
Short: "Show nodes for container",
Long: "Show nodes taking part in a container at the current epoch.",
- Run: func(cmd *cobra.Command, _ []string) {
- cnr, pkey := getContainer(cmd)
+ Run: func(cmd *cobra.Command, args []string) {
+ var cnr, pkey = getContainer(cmd)
if pkey == nil {
pkey = key.GetOrGenerate(cmd)
@@ -47,14 +46,7 @@ var containerNodesCmd = &cobra.Command{
commonCmd.ExitOnErr(cmd, "could not build container nodes for given container: %w", err)
for i := range cnrNodes {
- if repNum := policy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 {
- cmd.Printf("Descriptor #%d, REP %d:\n", i+1, repNum)
- } else if ecParts := policy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 {
- cmd.Printf("Descriptor #%d, EC %d.%d:\n", i+1, policy.ReplicaDescriptor(i).GetECDataCount(),
- policy.ReplicaDescriptor(i).GetECParityCount())
- } else {
- commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set"))
- }
+ cmd.Printf("Descriptor #%d, REP %d:\n", i+1, policy.ReplicaNumberByIndex(i))
for j := range cnrNodes[i] {
commonCmd.PrettyPrintNodeInfo(cmd, cnrNodes[i][j], j, "\t", short)
}
diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go
index cf4862b4a..1ac41f08c 100644
--- a/cmd/frostfs-cli/modules/container/policy_playground.go
+++ b/cmd/frostfs-cli/modules/container/policy_playground.go
@@ -1,13 +1,12 @@
package container
import (
+ "bufio"
"encoding/hex"
"encoding/json"
- "errors"
"fmt"
- "maps"
+ "io"
"os"
- "slices"
"strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@@ -15,22 +14,22 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/chzyer/readline"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type policyPlaygroundREPL struct {
- cmd *cobra.Command
- nodes map[string]netmap.NodeInfo
- console *readline.Instance
+ cmd *cobra.Command
+ args []string
+ nodes map[string]netmap.NodeInfo
}
-func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
+func newPolicyPlaygroundREPL(cmd *cobra.Command, args []string) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
+ args: args,
nodes: map[string]netmap.NodeInfo{},
- }
+ }, nil
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@@ -40,10 +39,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
i := 1
for id, node := range repl.nodes {
var attrs []string
- for k, v := range node.Attributes() {
+ node.IterateAttributes(func(k, v string) {
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
- }
- fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
+ })
+ fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++
}
return nil
@@ -150,29 +149,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
for _, node := range ns {
ids = append(ids, hex.EncodeToString(node.PublicKey()))
}
- fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids)
+ fmt.Printf("\t%2d: %v\n", i+1, ids)
}
return nil
}
-func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
- if len(args) != 0 {
- if _, ok := commands[args[0]]; !ok {
- return fmt.Errorf("unknown command: %q", args[0])
- }
- fmt.Fprintln(repl.console, commands[args[0]].usage)
- return nil
- }
-
- commandList := slices.Collect(maps.Keys(commands))
- slices.Sort(commandList)
- for _, command := range commandList {
- fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
- }
- return nil
-}
-
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
var nm netmap.NetMap
var nodes []netmap.NodeInfo
@@ -183,105 +165,6 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
return nm
}
-type commandDescription struct {
- descriprion string
- usage string
-}
-
-var commands = map[string]commandDescription{
- "list": {
- descriprion: "Display all nodes in the netmap",
- usage: `Display all nodes in the netmap
-Example of usage:
- list
- 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
- 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
-`,
- },
-
- "ls": {
- descriprion: "Display all nodes in the netmap",
- usage: `Display all nodes in the netmap
-Example of usage:
- ls
- 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
- 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
-`,
- },
-
- "add": {
- descriprion: "Add a new node: add attr=value",
- usage: `Add a new node
-Example of usage:
- add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
- },
-
- "load": {
- descriprion: "Load netmap from file: load ",
- usage: `Load netmap from file
-Example of usage:
- load "netmap.json"
-File format (netmap.json):
-{
- "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
- "continent": "Europe",
- "country": "Poland"
- },
- "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
- "continent": "Antarctica",
- "country": "Heard Island"
- }
-}`,
- },
-
- "remove": {
- descriprion: "Remove a node: remove ",
- usage: `Remove a node
-Example of usage:
- remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
- },
-
- "rm": {
- descriprion: "Remove a node: rm ",
- usage: `Remove a node
-Example of usage:
- rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
- },
-
- "eval": {
- descriprion: "Evaluate a policy: eval ",
- usage: `Evaluate a policy
-Example of usage:
- eval REP 2`,
- },
-
- "help": {
- descriprion: "Show available commands",
- },
-}
-
-func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
- if len(args) == 0 {
- return nil
- }
-
- switch args[0] {
- case "list", "ls":
- return repl.handleLs(args[1:])
- case "add":
- return repl.handleAdd(args[1:])
- case "load":
- return repl.handleLoad(args[1:])
- case "remove", "rm":
- return repl.handleRemove(args[1:])
- case "eval":
- return repl.handleEval(args[1:])
- case "help":
- return repl.handleHelp(args[1:])
- }
- return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
-}
-
func (repl *policyPlaygroundREPL) run() error {
if len(viper.GetString(commonflags.RPC)) > 0 {
key := key.GetOrGenerate(repl.cmd)
@@ -299,51 +182,36 @@ func (repl *policyPlaygroundREPL) run() error {
}
}
- if len(viper.GetString(netmapConfigPath)) > 0 {
- err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)})
- commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err)
+ cmdHandlers := map[string]func([]string) error{
+ "list": repl.handleLs,
+ "ls": repl.handleLs,
+ "add": repl.handleAdd,
+ "load": repl.handleLoad,
+ "remove": repl.handleRemove,
+ "rm": repl.handleRemove,
+ "eval": repl.handleEval,
}
-
- var cfgCompleter []readline.PrefixCompleterInterface
- var helpSubItems []readline.PrefixCompleterInterface
-
- for name := range commands {
- if name != "help" {
- cfgCompleter = append(cfgCompleter, readline.PcItem(name))
- helpSubItems = append(helpSubItems, readline.PcItem(name))
- }
- }
-
- cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
- completer := readline.NewPrefixCompleter(cfgCompleter...)
- rl, err := readline.NewEx(&readline.Config{
- Prompt: "> ",
- InterruptPrompt: "^C",
- AutoComplete: completer,
- })
- if err != nil {
- return fmt.Errorf("error initializing readline: %w", err)
- }
- repl.console = rl
- defer rl.Close()
-
- var exit bool
- for {
- line, err := rl.Readline()
+ for reader := bufio.NewReader(os.Stdin); ; {
+ fmt.Print("> ")
+ line, err := reader.ReadString('\n')
if err != nil {
- if errors.Is(err, readline.ErrInterrupt) {
- if exit {
- return nil
- }
- exit = true
- continue
+ if err == io.EOF {
+ return nil
}
- return fmt.Errorf("reading line: %w", err)
+ return fmt.Errorf("reading line: %v", err)
}
- exit = false
-
- if err := repl.handleCommand(strings.Fields(line)); err != nil {
- fmt.Fprintf(repl.console, "error: %v\n", err)
+ parts := strings.Fields(line)
+ if len(parts) == 0 {
+ continue
+ }
+ cmd := parts[0]
+ handler, exists := cmdHandlers[cmd]
+ if exists {
+ if err := handler(parts[1:]); err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+ } else {
+ fmt.Printf("error: unknown command %q\n", cmd)
}
}
}
@@ -353,20 +221,13 @@ var policyPlaygroundCmd = &cobra.Command{
Short: "A REPL for testing placement policies",
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
- Run: func(cmd *cobra.Command, _ []string) {
- repl := newPolicyPlaygroundREPL(cmd)
+ Run: func(cmd *cobra.Command, args []string) {
+ repl, err := newPolicyPlaygroundREPL(cmd, args)
+ commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}
-const (
- netmapConfigPath = "netmap-config"
- netmapConfigUsage = "Path to the netmap configuration file"
-)
-
func initContainerPolicyPlaygroundCmd() {
commonflags.Init(policyPlaygroundCmd)
- policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
-
- _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
}
diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go
index 2da21e767..f3c3e0e3a 100644
--- a/cmd/frostfs-cli/modules/container/root.go
+++ b/cmd/frostfs-cli/modules/container/root.go
@@ -1,6 +1,7 @@
package container
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"github.com/spf13/cobra"
)
@@ -10,12 +11,14 @@ var Cmd = &cobra.Command{
Use: "container",
Short: "Operations with containers",
Long: "Operations with containers",
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
// bind exactly that cmd's flags to
// the viper before execution
commonflags.Bind(cmd)
commonflags.BindAPI(cmd)
+ common.StartClientCommandSpan(cmd)
},
+ PersistentPostRun: common.StopClientCommandSpan,
}
func init() {
@@ -25,6 +28,8 @@ func init() {
deleteContainerCmd,
listContainerObjectsCmd,
getContainerInfoCmd,
+ getExtendedACLCmd,
+ setExtendedACLCmd,
containerNodesCmd,
policyPlaygroundCmd,
}
@@ -36,6 +41,8 @@ func init() {
initContainerDeleteCmd()
initContainerListObjectsCmd()
initContainerInfoCmd()
+ initContainerGetEACLCmd()
+ initContainerSetEACLCmd()
initContainerNodesCmd()
initContainerPolicyPlaygroundCmd()
@@ -49,6 +56,7 @@ func init() {
}{
{createContainerCmd, "PUT"},
{deleteContainerCmd, "DELETE"},
+ {setExtendedACLCmd, "SETEACL"},
} {
commonflags.InitSession(el.cmd, "container "+el.verb)
}
diff --git a/cmd/frostfs-cli/modules/container/set_eacl.go b/cmd/frostfs-cli/modules/container/set_eacl.go
new file mode 100644
index 000000000..f2f60f5c3
--- /dev/null
+++ b/cmd/frostfs-cli/modules/container/set_eacl.go
@@ -0,0 +1,108 @@
+package container
+
+import (
+ "bytes"
+ "errors"
+ "time"
+
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "github.com/spf13/cobra"
+)
+
+var flagVarsSetEACL struct {
+ noPreCheck bool
+
+ srcPath string
+}
+
+var setExtendedACLCmd = &cobra.Command{
+ Use: "set-eacl",
+ Short: "Set new extended ACL table for container",
+ Long: `Set new extended ACL table for container.
+Container ID in EACL table will be substituted with ID from the CLI.`,
+ Run: func(cmd *cobra.Command, args []string) {
+ id := parseContainerID(cmd)
+ eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath)
+
+ tok := getSession(cmd)
+
+ eaclTable.SetCID(id)
+
+ pk := key.GetOrGenerate(cmd)
+ cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
+
+ if !flagVarsSetEACL.noPreCheck {
+ cmd.Println("Checking the ability to modify access rights in the container...")
+
+ extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id)
+ commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
+
+ if !extendable {
+ commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable"))
+ }
+
+ cmd.Println("ACL extension is enabled in the container, continue processing.")
+ }
+
+ setEACLPrm := internalclient.SetEACLPrm{
+ Client: cli,
+ ClientParams: client.PrmContainerSetEACL{
+ Table: eaclTable,
+ Session: tok,
+ },
+ }
+
+ _, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ if containerAwait {
+ exp, err := eaclTable.Marshal()
+ commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err)
+
+ cmd.Println("awaiting...")
+
+ getEACLPrm := internalclient.EACLPrm{
+ Client: cli,
+ ClientParams: client.PrmContainerEACL{
+ ContainerID: &id,
+ },
+ }
+
+ for i := 0; i < awaitTimeout; i++ {
+ time.Sleep(1 * time.Second)
+
+ res, err := internalclient.EACL(cmd.Context(), getEACLPrm)
+ if err == nil {
+ // compare binary values because EACL could have been set already
+ table := res.EACL()
+ got, err := table.Marshal()
+ if err != nil {
+ continue
+ }
+
+ if bytes.Equal(exp, got) {
+ cmd.Println("EACL has been persisted on sidechain")
+ return
+ }
+ }
+ }
+
+ commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout)
+ }
+ },
+}
+
+func initContainerSetEACLCmd() {
+ commonflags.Init(setExtendedACLCmd)
+
+ flags := setExtendedACLCmd.Flags()
+ flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
+ flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table")
+ flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted")
+ flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL")
+}
diff --git a/cmd/frostfs-cli/modules/container/util.go b/cmd/frostfs-cli/modules/container/util.go
index 4cb268ec5..48265f785 100644
--- a/cmd/frostfs-cli/modules/container/util.go
+++ b/cmd/frostfs-cli/modules/container/util.go
@@ -18,8 +18,9 @@ const (
)
var (
- errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
- errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
+ errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
+ errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
+ errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain")
)
func parseContainerID(cmd *cobra.Command) cid.ID {
diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go
deleted file mode 100644
index 42f229ad9..000000000
--- a/cmd/frostfs-cli/modules/control/add_rule.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/spf13/cobra"
-)
-
-var addRuleCmd = &cobra.Command{
- Use: "add-rule",
- Short: "Add local override",
- Long: "Add local APE rule to a node with following format:\n[:action_detail] [ ...] ",
- Example: `control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --rule "allow Object.Get *"
---rule "deny Object.Get EbxzAdz5LB4uqxuz6crWKAumBNtZyK2rKsqQP7TdZvwr/*"
---rule "deny:QuotaLimitReached Object.Put ResourceCondition:Department=HR *"
-
-control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --path some_chain.json
-`,
- Run: addRule,
-}
-
-func addRule(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- target := parseTarget(cmd)
-
- parsed := apeCmd.ParseChain(cmd)
-
- req := &control.AddChainLocalOverrideRequest{
- Body: &control.AddChainLocalOverrideRequest_Body{
- Target: target,
- Chain: parsed.Bytes(),
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.AddChainLocalOverrideResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.AddChainLocalOverride(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
- cmd.Println("\nRule has been added.")
-}
-
-func initControlAddRuleCmd() {
- initControlFlags(addRuleCmd)
-
- ff := addRuleCmd.Flags()
- ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement")
- ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format")
- ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain")
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
- ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
-
- addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go
deleted file mode 100644
index 025a6e561..000000000
--- a/cmd/frostfs-cli/modules/control/detach_shards.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/spf13/cobra"
-)
-
-var shardsDetachCmd = &cobra.Command{
- Use: "detach",
- Short: "Detach and close the shards",
- Long: "Detach and close the shards",
- Run: shardsDetach,
-}
-
-func shardsDetach(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- req := &control.DetachShardsRequest{
- Body: &control.DetachShardsRequest_Body{
- Shard_ID: getShardIDListFromIDFlag(cmd, false),
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.DetachShardsResponse
- var err error
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.DetachShards(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Shard mode update request successfully sent.")
-}
-
-func initControlShardsDetachCmd() {
- initControlFlags(shardsDetachCmd)
-
- flags := shardsDetachCmd.Flags()
- flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
-}
diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go
index 632cdd6a7..13bb81a0a 100644
--- a/cmd/frostfs-cli/modules/control/doctor.go
+++ b/cmd/frostfs-cli/modules/control/doctor.go
@@ -1,10 +1,10 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go
index dcc1c1229..9de3ae8a3 100644
--- a/cmd/frostfs-cli/modules/control/drop_objects.go
+++ b/cmd/frostfs-cli/modules/control/drop_objects.go
@@ -1,10 +1,10 @@
package control
import (
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
@@ -14,7 +14,7 @@ var dropObjectsCmd = &cobra.Command{
Use: "drop-objects",
Short: "Drop objects from the node's local storage",
Long: "Drop objects from the node's local storage",
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
pk := key.Get(cmd)
dropObjectsList, _ := cmd.Flags().GetStringSlice(dropObjectsFlag)
diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go
new file mode 100644
index 000000000..458e4cc0b
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/evacuate_shard.go
@@ -0,0 +1,56 @@
+package control
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/spf13/cobra"
+)
+
+const ignoreErrorsFlag = "no-errors"
+
+var evacuateShardCmd = &cobra.Command{
+ Use: "evacuate",
+ Short: "Evacuate objects from shard",
+ Long: "Evacuate objects from shard to other shards",
+ Run: evacuateShard,
+ Deprecated: "use frostfs-cli control shards evacuation start",
+}
+
+func evacuateShard(cmd *cobra.Command, _ []string) {
+ pk := key.Get(cmd)
+
+ req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
+ req.Body.Shard_ID = getShardIDList(cmd)
+ req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
+
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var resp *control.EvacuateShardResponse
+ var err error
+ err = cli.ExecRaw(func(client *client.Client) error {
+ resp, err = control.EvacuateShard(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ cmd.Println("Shard has successfully been evacuated.")
+}
+
+func initControlEvacuateShardCmd() {
+ initControlFlags(evacuateShardCmd)
+
+ flags := evacuateShardCmd.Flags()
+ flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
+ flags.Bool(shardAllFlag, false, "Process all shards")
+ flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
+
+ evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
+}
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index b8d7eb046..4eb6505cf 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -7,28 +7,18 @@ import (
"sync/atomic"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
const (
- awaitFlag = "await"
- noProgressFlag = "no-progress"
- scopeFlag = "scope"
- repOneOnlyFlag = "rep-one-only"
- ignoreErrorsFlag = "no-errors"
-
- containerWorkerCountFlag = "container-worker-count"
- objectWorkerCountFlag = "object-worker-count"
-
- scopeAll = "all"
- scopeObjects = "objects"
- scopeTrees = "trees"
+ awaitFlag = "await"
+ noProgressFlag = "no-progress"
)
var evacuationShardCmd = &cobra.Command{
@@ -58,29 +48,15 @@ var stopEvacuationShardCmd = &cobra.Command{
Run: stopEvacuateShardStatus,
}
-var resetEvacuationStatusShardCmd = &cobra.Command{
- Use: "reset",
- Short: "Reset evacuate objects from shard status",
- Long: "Reset evacuate objects from shard to other shards status",
- Run: resetEvacuateShardStatus,
-}
-
func startEvacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
- containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
- objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
- repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Scope: getEvacuationScope(cmd),
- ContainerWorkerCount: containerWorkerCount,
- ObjectWorkerCount: objectWorkerCount,
- RepOneOnly: repOneOnly,
+ Shard_ID: getShardIDList(cmd),
+ IgnoreErrors: ignoreErrors,
},
}
@@ -106,22 +82,6 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
}
}
-func getEvacuationScope(cmd *cobra.Command) uint32 {
- rawScope, err := cmd.Flags().GetString(scopeFlag)
- commonCmd.ExitOnErr(cmd, "Invalid scope value: %w", err)
- switch rawScope {
- case scopeAll:
- return uint32(control.StartShardEvacuationRequest_Body_OBJECTS) | uint32(control.StartShardEvacuationRequest_Body_TREES)
- case scopeObjects:
- return uint32(control.StartShardEvacuationRequest_Body_OBJECTS)
- case scopeTrees:
- return uint32(control.StartShardEvacuationRequest_Body_TREES)
- default:
- commonCmd.ExitOnErr(cmd, "Invalid scope value: %w", fmt.Errorf("unknown scope %s", rawScope))
- }
- return uint32(control.StartShardEvacuationRequest_Body_NONE)
-}
-
func getEvacuateShardStatus(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.GetShardEvacuationStatusRequest{
@@ -170,29 +130,6 @@ func stopEvacuateShardStatus(cmd *cobra.Command, _ []string) {
cmd.Println("Evacuation stopped.")
}
-func resetEvacuateShardStatus(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
- req := &control.ResetShardEvacuationStatusRequest{
- Body: &control.ResetShardEvacuationStatusRequest_Body{},
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.ResetShardEvacuationStatusResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.ResetShardEvacuationStatus(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "Reset shards evacuation status failed, rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Shards evacuation status has been reset.")
-}
-
func waitEvacuateCompletion(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *clientSDK.Client, printProgress, printCompleted bool) {
const statusPollingInterval = 1 * time.Second
const reportIntervalSeconds = 5
@@ -282,21 +219,19 @@ func printStatus(cmd *cobra.Command, resp *control.GetShardEvacuationStatusRespo
func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING ||
resp.GetBody().GetDuration() == nil ||
- (resp.GetBody().GetTotalObjects() == 0 && resp.GetBody().GetTotalTrees() == 0) ||
- (resp.GetBody().GetEvacuatedObjects()+resp.GetBody().GetFailedObjects()+resp.GetBody().GetSkippedObjects() == 0 &&
- resp.GetBody().GetEvacuatedTrees()+resp.GetBody().GetFailedTrees() == 0) {
+ resp.GetBody().GetTotal() == 0 ||
+ resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed() == 0 {
return
}
durationSeconds := float64(resp.GetBody().GetDuration().GetSeconds())
- evacuated := float64(resp.GetBody().GetEvacuatedObjects() + resp.GetBody().GetFailedObjects() + resp.GetBody().GetSkippedObjects() +
- resp.GetBody().GetEvacuatedTrees() + resp.GetBody().GetFailedTrees())
+ evacuated := float64(resp.GetBody().GetEvacuated() + resp.GetBody().GetFailed())
avgObjEvacuationTimeSeconds := durationSeconds / evacuated
- objectsLeft := float64(resp.GetBody().GetTotalObjects()+resp.GetBody().GetTotalTrees()) - evacuated
+ objectsLeft := float64(resp.GetBody().GetTotal()) - evacuated
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60)
- fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
+ sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
}
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -305,20 +240,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60
- fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
+ sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
}
}
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
- fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
+ sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
}
}
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
- if len(resp.GetBody().GetErrorMessage()) > 0 {
- fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
+ if len(resp.Body.GetErrorMessage()) > 0 {
+ sb.WriteString(fmt.Sprintf(" Error: %s.", resp.Body.GetErrorMessage()))
}
}
@@ -332,7 +267,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default:
status = "undefined"
}
- fmt.Fprintf(sb, " Status: %s.", status)
+ sb.WriteString(fmt.Sprintf(" Status: %s.", status))
}
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@@ -350,26 +285,20 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
}
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
- fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
- resp.GetBody().GetEvacuatedObjects(),
- resp.GetBody().GetTotalObjects(),
- resp.GetBody().GetFailedObjects(),
- resp.GetBody().GetSkippedObjects(),
- resp.GetBody().GetEvacuatedTrees(),
- resp.GetBody().GetTotalTrees(),
- resp.GetBody().GetFailedTrees())
+ sb.WriteString(fmt.Sprintf(" Evacuated %d object out of %d, failed to evacuate %d objects.",
+ resp.GetBody().GetEvacuated(),
+ resp.Body.GetTotal(),
+ resp.Body.GetFailed()))
}
func initControlEvacuationShardCmd() {
evacuationShardCmd.AddCommand(startEvacuationShardCmd)
evacuationShardCmd.AddCommand(getEvacuationShardStatusCmd)
evacuationShardCmd.AddCommand(stopEvacuationShardCmd)
- evacuationShardCmd.AddCommand(resetEvacuationStatusShardCmd)
initControlStartEvacuationShardCmd()
initControlFlags(getEvacuationShardStatusCmd)
initControlFlags(stopEvacuationShardCmd)
- initControlFlags(resetEvacuationStatusShardCmd)
}
func initControlStartEvacuationShardCmd() {
@@ -379,12 +308,8 @@ func initControlStartEvacuationShardCmd() {
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
- flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll))
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
- flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
- flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
- flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go
index 280aacfad..48be393dc 100644
--- a/cmd/frostfs-cli/modules/control/flush_cache.go
+++ b/cmd/frostfs-cli/modules/control/flush_cache.go
@@ -1,30 +1,24 @@
package control
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
-const sealFlag = "seal"
-
var flushCacheCmd = &cobra.Command{
- Use: "flush-cache",
- Short: "Flush objects from the write-cache to the main storage",
- Long: "Flush objects from the write-cache to the main storage",
- Run: flushCache,
- Deprecated: "Flushing objects from writecache to the main storage is performed by writecache automatically. To flush and seal writecache use `frostfs-cli control shards writecache seal`.",
+ Use: "flush-cache",
+ Short: "Flush objects from the write-cache to the main storage",
+ Long: "Flush objects from the write-cache to the main storage",
+ Run: flushCache,
}
func flushCache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
- seal, _ := cmd.Flags().GetBool(sealFlag)
- req := &control.FlushCacheRequest{Body: &control.FlushCacheRequest_Body{
- Seal: seal,
- }}
+ req := &control.FlushCacheRequest{Body: new(control.FlushCacheRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
signRequest(cmd, pk, req)
@@ -50,7 +44,6 @@ func initControlFlushCacheCmd() {
ff := flushCacheCmd.Flags()
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
ff.Bool(shardAllFlag, false, "Process all shards")
- ff.Bool(sealFlag, false, "Writecache will be left in read-only mode after flush completed")
flushCacheCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go
deleted file mode 100644
index 4da903a9a..000000000
--- a/cmd/frostfs-cli/modules/control/get_rule.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package control
-
-import (
- "encoding/hex"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "github.com/spf13/cobra"
-)
-
-var getRuleCmd = &cobra.Command{
- Use: "get-rule",
- Short: "Get local override",
- Long: "Get local APE override of the node",
- Run: getRule,
-}
-
-func getRule(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- target := parseTarget(cmd)
-
- chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
- hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
-
- if hexEncoded {
- chainIDBytes, err := hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- chainID = string(chainIDBytes)
- }
-
- req := &control.GetChainLocalOverrideRequest{
- Body: &control.GetChainLocalOverrideRequest_Body{
- Target: target,
- ChainId: []byte(chainID),
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.GetChainLocalOverrideResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.GetChainLocalOverride(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- var chain apechain.Chain
- commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain()))
- apecmd.PrintHumanReadableAPEChain(cmd, &chain)
-}
-
-func initControGetRuleCmd() {
- initControlFlags(getRuleCmd)
-
- ff := getRuleCmd.Flags()
- ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
- ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
- _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
- ff.String(apecmd.ChainIDFlag, "", "Chain id")
- ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex")
-}
diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go
index 1d4441f1e..097fba540 100644
--- a/cmd/frostfs-cli/modules/control/healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/healthcheck.go
@@ -1,13 +1,10 @@
package control
import (
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
@@ -27,7 +24,6 @@ func initControlHealthCheckCmd() {
flags := healthCheckCmd.Flags()
flags.Bool(healthcheckIRFlag, false, "Communicate with IR node")
- flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage)
_ = flags.MarkDeprecated(healthcheckIRFlag, "for health check of inner ring nodes, use the 'control ir healthcheck' command instead.")
}
@@ -54,12 +50,6 @@ func healthCheck(cmd *cobra.Command, args []string) {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
- if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag {
- if resp.GetBody().GetHealthStatus() == control.HealthStatus_READY {
- return
- }
- os.Exit(1)
- }
cmd.Printf("Network status: %s\n", resp.GetBody().GetNetmapStatus())
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
diff --git a/cmd/frostfs-cli/modules/control/ir.go b/cmd/frostfs-cli/modules/control/ir.go
index 2a38f1e97..396d5d0a5 100644
--- a/cmd/frostfs-cli/modules/control/ir.go
+++ b/cmd/frostfs-cli/modules/control/ir.go
@@ -1,9 +1,6 @@
package control
-import (
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "github.com/spf13/cobra"
-)
+import "github.com/spf13/cobra"
var irCmd = &cobra.Command{
Use: "ir",
@@ -15,20 +12,8 @@ func initControlIRCmd() {
irCmd.AddCommand(tickEpochCmd)
irCmd.AddCommand(removeNodeCmd)
irCmd.AddCommand(irHealthCheckCmd)
- irCmd.AddCommand(removeContainerCmd)
initControlIRTickEpochCmd()
initControlIRRemoveNodeCmd()
initControlIRHealthCheckCmd()
- initControlIRRemoveContainerCmd()
-}
-
-func printVUB(cmd *cobra.Command, vub uint32) {
- cmd.Printf("Transaction's valid until block is %d\n", vub)
-}
-
-func parseVUB(cmd *cobra.Command) uint32 {
- vub, err := cmd.Flags().GetUint32(irFlagNameVUB)
- commonCmd.ExitOnErr(cmd, "invalid valid until block value: %w", err)
- return vub
}
diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
index 373f21c30..e70538ce2 100644
--- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go
+++ b/cmd/frostfs-cli/modules/control/ir_healthcheck.go
@@ -1,14 +1,11 @@
package control
import (
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
@@ -21,8 +18,6 @@ var irHealthCheckCmd = &cobra.Command{
func initControlIRHealthCheckCmd() {
initControlFlags(irHealthCheckCmd)
- flags := irHealthCheckCmd.Flags()
- flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage)
}
func irHealthCheck(cmd *cobra.Command, _ []string) {
@@ -44,12 +39,6 @@ func irHealthCheck(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
- if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag {
- if resp.GetBody().GetHealthStatus() == ircontrol.HealthStatus_READY {
- return
- }
- os.Exit(1)
- }
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
}
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go
deleted file mode 100644
index 460e299e5..000000000
--- a/cmd/frostfs-cli/modules/control/ir_remove_container.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/spf13/cobra"
-)
-
-const (
- ownerFlag = "owner"
-)
-
-var removeContainerCmd = &cobra.Command{
- Use: "remove-container",
- Short: "Schedules a container removal",
- Long: `Schedules a container removal via a notary request.
-Container data will be deleted asynchronously by policer.
-To check removal status "frostfs-cli container list" command can be used.`,
- Run: removeContainer,
-}
-
-func initControlIRRemoveContainerCmd() {
- initControlIRFlags(removeContainerCmd)
-
- flags := removeContainerCmd.Flags()
- flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.String(ownerFlag, "", "Container owner's wallet address.")
- removeContainerCmd.MarkFlagsMutuallyExclusive(commonflags.CIDFlag, ownerFlag)
- removeContainerCmd.MarkFlagsOneRequired(commonflags.CIDFlag, ownerFlag)
-}
-
-func removeContainer(cmd *cobra.Command, _ []string) {
- req := prepareRemoveContainerRequest(cmd)
-
- pk := key.Get(cmd)
- c := getClient(cmd, pk)
-
- commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req))
-
- var resp *ircontrol.RemoveContainerResponse
- err := c.ExecRaw(func(client *rawclient.Client) error {
- var err error
- resp, err = ircontrol.RemoveContainer(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "failed to execute request: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- if len(req.GetBody().GetContainerId()) > 0 {
- cmd.Println("Container scheduled to removal")
- } else {
- cmd.Println("User containers sheduled to removal")
- }
- printVUB(cmd, resp.GetBody().GetVub())
-}
-
-func prepareRemoveContainerRequest(cmd *cobra.Command) *ircontrol.RemoveContainerRequest {
- req := &ircontrol.RemoveContainerRequest{
- Body: &ircontrol.RemoveContainerRequest_Body{},
- }
-
- cidStr, err := cmd.Flags().GetString(commonflags.CIDFlag)
- commonCmd.ExitOnErr(cmd, "failed to get cid: ", err)
-
- ownerStr, err := cmd.Flags().GetString(ownerFlag)
- commonCmd.ExitOnErr(cmd, "failed to get owner: ", err)
-
- if len(ownerStr) > 0 {
- var owner user.ID
- commonCmd.ExitOnErr(cmd, "invalid owner ID: %w", owner.DecodeString(ownerStr))
- var ownerID refs.OwnerID
- owner.WriteToV2(&ownerID)
- req.Body.Owner = ownerID.StableMarshal(nil)
- }
-
- if len(cidStr) > 0 {
- var containerID cid.ID
- commonCmd.ExitOnErr(cmd, "invalid container ID: %w", containerID.DecodeString(cidStr))
- req.Body.ContainerId = containerID[:]
- }
-
- req.Body.Vub = parseVUB(cmd)
-
- return req
-}
diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go
index 2fe686d63..f5b968b7f 100644
--- a/cmd/frostfs-cli/modules/control/ir_remove_node.go
+++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go
@@ -4,11 +4,11 @@ import (
"encoding/hex"
"errors"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
@@ -20,7 +20,7 @@ var removeNodeCmd = &cobra.Command{
}
func initControlIRRemoveNodeCmd() {
- initControlIRFlags(removeNodeCmd)
+ initControlFlags(removeNodeCmd)
flags := removeNodeCmd.Flags()
flags.String("node", "", "Node public key as a hex string")
@@ -41,7 +41,6 @@ func removeNode(cmd *cobra.Command, _ []string) {
req := new(ircontrol.RemoveNodeRequest)
req.SetBody(&ircontrol.RemoveNodeRequest_Body{
Key: nodeKey,
- Vub: parseVUB(cmd),
})
commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req))
@@ -56,5 +55,4 @@ func removeNode(cmd *cobra.Command, _ []string) {
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Node removed")
- printVUB(cmd, resp.GetBody().GetVub())
}
diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
index 5f09e92c1..3e6af0081 100644
--- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
+++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go
@@ -1,11 +1,11 @@
package control
import (
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
@@ -17,7 +17,7 @@ var tickEpochCmd = &cobra.Command{
}
func initControlIRTickEpochCmd() {
- initControlIRFlags(tickEpochCmd)
+ initControlFlags(tickEpochCmd)
}
func tickEpoch(cmd *cobra.Command, _ []string) {
@@ -25,9 +25,7 @@ func tickEpoch(cmd *cobra.Command, _ []string) {
c := getClient(cmd, pk)
req := new(ircontrol.TickEpochRequest)
- req.SetBody(&ircontrol.TickEpochRequest_Body{
- Vub: parseVUB(cmd),
- })
+ req.SetBody(new(ircontrol.TickEpochRequest_Body))
err := ircontrolsrv.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
@@ -42,5 +40,4 @@ func tickEpoch(cmd *cobra.Command, _ []string) {
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Epoch tick requested")
- printVUB(cmd, resp.GetBody().GetVub())
}
diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go
deleted file mode 100644
index a6c65d083..000000000
--- a/cmd/frostfs-cli/modules/control/list_rules.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package control
-
-import (
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/spf13/cobra"
-)
-
-var listRulesCmd = &cobra.Command{
- Use: "list-rules",
- Short: "List local overrides",
- Long: "List local APE overrides of the node",
- Run: listRules,
-}
-
-var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{
- policyengine.Namespace: control.ChainTarget_NAMESPACE,
- policyengine.Container: control.ChainTarget_CONTAINER,
- policyengine.User: control.ChainTarget_USER,
- policyengine.Group: control.ChainTarget_GROUP,
-}
-
-func parseTarget(cmd *cobra.Command) *control.ChainTarget {
- target := apeCmd.ParseTarget(cmd)
-
- typ, ok := engineToControlSvcType[target.Type]
- if !ok {
- commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type))
- }
-
- return &control.ChainTarget{
- Name: target.Name,
- Type: typ,
- }
-}
-
-func listRules(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- target := parseTarget(cmd)
- req := &control.ListChainLocalOverridesRequest{
- Body: &control.ListChainLocalOverridesRequest_Body{
- Target: target,
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.ListChainLocalOverridesResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.ListChainLocalOverrides(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- chains := resp.GetBody().GetChains()
- if len(chains) == 0 {
- cmd.Printf("Local overrides are not defined for the %s.\n", strings.ToLower(target.GetType().String()))
- return
- }
-
- for _, c := range chains {
- var chain apechain.Chain
- commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c))
- apeCmd.PrintHumanReadableAPEChain(cmd, &chain)
- }
-}
-
-func initControlListRulesCmd() {
- initControlFlags(listRulesCmd)
-
- ff := listRulesCmd.Flags()
- ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc)
- ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc)
- _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go
deleted file mode 100644
index 3142d02e7..000000000
--- a/cmd/frostfs-cli/modules/control/list_targets.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package control
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "text/tabwriter"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/spf13/cobra"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var listTargetsCmd = &cobra.Command{
- Use: "list-targets",
- Short: "List local targets",
- Long: "List local APE overrides of the node",
- Run: listTargets,
-}
-
-func listTargets(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- chainName := apeCmd.ParseChainName(cmd)
-
- req := &control.ListTargetsLocalOverridesRequest{
- Body: &control.ListTargetsLocalOverridesRequest_Body{
- ChainName: string(chainName),
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.ListTargetsLocalOverridesResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.ListTargetsLocalOverrides(client, req)
- return err
- })
- if err != nil && status.Code(err) == codes.NotFound {
- cmd.Println("Local overrides are not defined for any target.")
- return
- }
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- targets := resp.GetBody().GetTargets()
- if len(targets) == 0 {
- cmd.Println("Local overrides are not defined for any target.")
- return
- }
-
- buf := bytes.NewBuffer(nil)
- tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
- _, _ = tw.Write([]byte("#\tName\tType\n"))
- for i, t := range targets {
- _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
- }
- _ = tw.Flush()
- cmd.Print(buf.String())
-}
-
-func initControlListTargetsCmd() {
- initControlFlags(listTargetsCmd)
-
- ff := listTargetsCmd.Flags()
- ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc)
-
- _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go
deleted file mode 100644
index 4cb4be539..000000000
--- a/cmd/frostfs-cli/modules/control/locate.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package control
-
-import (
- "bytes"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
- "github.com/spf13/cobra"
-)
-
-const (
- FullInfoFlag = "full"
- FullInfoFlagUsage = "Print full ShardInfo."
-)
-
-var locateObjectCmd = &cobra.Command{
- Use: "locate-object",
- Short: "List shards storing the object",
- Long: "List shards storing the object",
- Run: locateObject,
-}
-
-func initControlLocateObjectCmd() {
- initControlFlags(locateObjectCmd)
-
- flags := locateObjectCmd.Flags()
-
- flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
-
- flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
- _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
-
- flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
- flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
-}
-
-func locateObject(cmd *cobra.Command, _ []string) {
- var cnr cid.ID
- var obj oid.ID
-
- _ = object.ReadObjectAddress(cmd, &cnr, &obj)
-
- pk := key.Get(cmd)
-
- body := new(control.ListShardsForObjectRequest_Body)
- body.SetContainerId(cnr.EncodeToString())
- body.SetObjectId(obj.EncodeToString())
- req := new(control.ListShardsForObjectRequest)
- req.SetBody(body)
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var err error
- var resp *control.ListShardsForObjectResponse
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.ListShardsForObject(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- shardIDs := resp.GetBody().GetShard_ID()
-
- isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
- if !isFull {
- for _, id := range shardIDs {
- cmd.Println(base58.Encode(id))
- }
- return
- }
-
- // get full shard info
- listShardsReq := new(control.ListShardsRequest)
- listShardsReq.SetBody(new(control.ListShardsRequest_Body))
- signRequest(cmd, pk, listShardsReq)
- var listShardsResp *control.ListShardsResponse
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- listShardsResp, err = control.ListShards(client, listShardsReq)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
-
- shards := listShardsResp.GetBody().GetShards()
- sortShardsByID(shards)
- shards = filterShards(shards, shardIDs)
-
- isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
- if isJSON {
- prettyPrintShardsJSON(cmd, shards)
- } else {
- prettyPrintShards(cmd, shards)
- }
-}
-
-func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
- var res []control.ShardInfo
- for _, id := range ids {
- for _, inf := range info {
- if bytes.Equal(inf.Shard_ID, id) {
- res = append(res, inf)
- }
- }
- }
- return res
-}
diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go
deleted file mode 100644
index 3df12a15d..000000000
--- a/cmd/frostfs-cli/modules/control/rebuild_shards.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package control
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/mr-tron/base58"
- "github.com/spf13/cobra"
-)
-
-const (
- fillPercentFlag = "fill_percent"
-)
-
-var shardsRebuildCmd = &cobra.Command{
- Use: "rebuild",
- Short: "Rebuild shards",
- Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)",
- Run: shardsRebuild,
-}
-
-func shardsRebuild(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- req := &control.StartShardRebuildRequest{
- Body: &control.StartShardRebuildRequest_Body{
- Shard_ID: getShardIDList(cmd),
- TargetFillPercent: getFillPercentValue(cmd),
- ConcurrencyLimit: getConcurrencyValue(cmd),
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.StartShardRebuildResponse
- var err error
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.StartShardRebuild(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- var success, failed uint
- for _, res := range resp.GetBody().GetResults() {
- if res.GetSuccess() {
- success++
- cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
- } else {
- failed++
- cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
- }
- }
- cmd.Printf("Total: %d success, %d failed\n", success, failed)
-}
-
-func getFillPercentValue(cmd *cobra.Command) uint32 {
- v, _ := cmd.Flags().GetUint32(fillPercentFlag)
- if v <= 0 || v > 100 {
- commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v))
- }
- return v
-}
-
-func getConcurrencyValue(cmd *cobra.Command) uint32 {
- v, _ := cmd.Flags().GetUint32(concurrencyFlag)
- if v <= 0 || v > 10000 {
- commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v))
- }
- return v
-}
-
-func initControlShardRebuildCmd() {
- initControlFlags(shardsRebuildCmd)
-
- flags := shardsRebuildCmd.Flags()
- flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
- flags.Bool(shardAllFlag, false, "Process all shards")
- flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space")
- flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files")
- setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go
deleted file mode 100644
index 036317bcb..000000000
--- a/cmd/frostfs-cli/modules/control/remove_rule.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package control
-
-import (
- "encoding/hex"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/spf13/cobra"
-)
-
-var (
- errEmptyChainID = errors.New("chain id cannot be empty")
-
- removeRuleCmd = &cobra.Command{
- Use: "remove-rule",
- Short: "Remove local override",
- Long: "Remove local APE override of the node",
- Run: removeRule,
- }
-)
-
-func removeRule(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
- hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag)
- removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag)
- if removeAll {
- req := &control.RemoveChainLocalOverridesByTargetRequest{
- Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{
- Target: parseTarget(cmd),
- },
- }
- signRequest(cmd, pk, req)
- cli := getClient(cmd, pk)
- var resp *control.RemoveChainLocalOverridesByTargetResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.RemoveChainLocalOverridesByTarget(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
- cmd.Println("All rules have been removed.")
- return
- }
-
- chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag)
- if chainID == "" {
- commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
- }
- chainIDRaw := []byte(chainID)
-
- if hexEncoded {
- var err error
- chainIDRaw, err = hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- }
-
- req := &control.RemoveChainLocalOverrideRequest{
- Body: &control.RemoveChainLocalOverrideRequest_Body{
- Target: parseTarget(cmd),
- ChainId: chainIDRaw,
- },
- }
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.RemoveChainLocalOverrideResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.RemoveChainLocalOverride(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Rule has been removed.")
-}
-
-func initControlRemoveRuleCmd() {
- initControlFlags(removeRuleCmd)
-
- ff := removeRuleCmd.Flags()
- ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc)
- ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc)
- _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag)
- ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc)
- ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc)
- ff.Bool(apecmd.AllFlag, false, "Remove all chains")
- removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag)
-}
diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go
index 3abfe80cb..015676185 100644
--- a/cmd/frostfs-cli/modules/control/root.go
+++ b/cmd/frostfs-cli/modules/control/root.go
@@ -10,7 +10,7 @@ var Cmd = &cobra.Command{
Use: "control",
Short: "Operations with storage node",
Long: `Operations with storage node`,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
ff := cmd.Flags()
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
@@ -34,12 +34,6 @@ func init() {
shardsCmd,
synchronizeTreeCmd,
irCmd,
- addRuleCmd,
- removeRuleCmd,
- listRulesCmd,
- getRuleCmd,
- listTargetsCmd,
- locateObjectCmd,
)
initControlHealthCheckCmd()
@@ -48,10 +42,4 @@ func init() {
initControlShardsCmd()
initControlSynchronizeTreeCmd()
initControlIRCmd()
- initControlAddRuleCmd()
- initControlRemoveRuleCmd()
- initControlListRulesCmd()
- initControGetRuleCmd()
- initControlListTargetsCmd()
- initControlLocateObjectCmd()
}
diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go
index 26a1ba883..3aa741689 100644
--- a/cmd/frostfs-cli/modules/control/set_netmap_status.go
+++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go
@@ -1,18 +1,14 @@
package control
import (
- "crypto/ecdsa"
- "errors"
"fmt"
- "time"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@@ -22,13 +18,8 @@ const (
netmapStatusOnline = "online"
netmapStatusOffline = "offline"
netmapStatusMaintenance = "maintenance"
-
- maxSetStatusMaxWaitTime = 30 * time.Minute
- setStatusWaitTimeout = 30 * time.Second
)
-var errNetmapStatusAwaitFailed = errors.New("netmap status hasn't changed for 30 minutes")
-
var setNetmapStatusCmd = &cobra.Command{
Use: "set-status",
Short: "Set status of the storage node in FrostFS network map",
@@ -52,8 +43,6 @@ func initControlSetNetmapStatusCmd() {
flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false,
"Force turning to local maintenance")
-
- flags.Bool(commonflags.AwaitFlag, false, commonflags.AwaitFlagUsage)
}
func setNetmapStatus(cmd *cobra.Command, _ []string) {
@@ -67,27 +56,22 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
}
}
- await, _ := cmd.Flags().GetBool(commonflags.AwaitFlag)
- var targetStatus control.NetmapStatus
switch st, _ := cmd.Flags().GetString(netmapStatusFlag); st {
default:
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported status %s", st))
case netmapStatusOnline:
body.SetStatus(control.NetmapStatus_ONLINE)
printIgnoreForce(control.NetmapStatus_ONLINE)
- targetStatus = control.NetmapStatus_ONLINE
case netmapStatusOffline:
body.SetStatus(control.NetmapStatus_OFFLINE)
printIgnoreForce(control.NetmapStatus_OFFLINE)
- targetStatus = control.NetmapStatus_OFFLINE
case netmapStatusMaintenance:
body.SetStatus(control.NetmapStatus_MAINTENANCE)
if force {
- body.SetForceMaintenance(true)
+ body.SetForceMaintenance()
common.PrintVerbose(cmd, "Local maintenance will be forced.")
}
- targetStatus = control.NetmapStatus_MAINTENANCE
}
req := new(control.SetNetmapStatusRequest)
@@ -108,52 +92,4 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Network status update request successfully sent.")
-
- if await {
- awaitSetNetmapStatus(cmd, pk, cli, targetStatus)
- }
-}
-
-func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.Client, targetStatus control.NetmapStatus) {
- req := &control.GetNetmapStatusRequest{
- Body: &control.GetNetmapStatusRequest_Body{},
- }
- signRequest(cmd, pk, req)
- var epoch uint64
- var status control.NetmapStatus
- startTime := time.Now()
- cmd.Println("Wait until epoch and netmap status change...")
- for {
- var resp *control.GetNetmapStatusResponse
- var err error
- err = cli.ExecRaw(func(client *rawclient.Client) error {
- resp, err = control.GetNetmapStatus(cmd.Context(), client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)
-
- if epoch == 0 {
- epoch = resp.GetBody().GetEpoch()
- }
-
- status = resp.GetBody().GetStatus()
- if resp.GetBody().GetEpoch() > epoch {
- epoch = resp.GetBody().GetEpoch()
- cmd.Printf("Epoch changed to %d\n", resp.GetBody().GetEpoch())
- }
-
- if status == targetStatus {
- break
- }
-
- if time.Since(startTime) > maxSetStatusMaxWaitTime {
- commonCmd.ExitOnErr(cmd, "failed to wait netmap status: %w", errNetmapStatusAwaitFailed)
- return
- }
-
- time.Sleep(setStatusWaitTimeout)
-
- cmd.Printf("Current netmap status '%s', target status '%s'\n", status.String(), targetStatus.String())
- }
- cmd.Printf("Netmap status changed to '%s' successfully.\n", status.String())
}
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index 3483f5d62..6208c560b 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -13,19 +13,15 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
+ shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
- shardsCmd.AddCommand(writecacheShardCmd)
- shardsCmd.AddCommand(shardsDetachCmd)
- shardsCmd.AddCommand(shardsRebuildCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
+ initControlEvacuateShardCmd()
initControlEvacuationShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()
- initControlShardsWritecacheCmd()
- initControlShardsDetachCmd()
- initControlShardRebuildCmd()
}
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index 40d6628ee..4d09667b8 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -7,11 +7,11 @@ import (
"sort"
"strings"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
@@ -61,18 +61,16 @@ func listShards(cmd *cobra.Command, _ []string) {
}
}
-func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
+func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
- "shard_id": base58.Encode(i.GetShard_ID()),
- "mode": shardModeToString(i.GetMode()),
- "metabase": i.GetMetabasePath(),
- "blobstor": i.GetBlobstor(),
- "writecache": i.GetWritecachePath(),
- "pilorama": i.GetPiloramaPath(),
- "error_count": i.GetErrorCount(),
- "evacuation_in_progress": i.GetEvacuationInProgress(),
+ "shard_id": base58.Encode(i.Shard_ID),
+ "mode": shardModeToString(i.GetMode()),
+ "metabase": i.GetMetabasePath(),
+ "blobstor": i.GetBlobstor(),
+ "writecache": i.GetWritecachePath(),
+ "error_count": i.GetErrorCount(),
})
}
@@ -81,10 +79,10 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
enc.SetIndent("", " ")
commonCmd.ExitOnErr(cmd, "cannot shard info to JSON: %w", enc.Encode(out))
- cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
+ cmd.Print(buf.String()) // pretty printer emits newline, to no need for Println
}
-func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
+func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
for _, i := range ii {
pathPrinter := func(name, path string) string {
if path == "" {
@@ -106,9 +104,8 @@ func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
sb.String()+
pathPrinter("Write-cache", i.GetWritecachePath())+
pathPrinter("Pilorama", i.GetPiloramaPath())+
- fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+
- fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()),
- base58.Encode(i.GetShard_ID()),
+ fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
+ base58.Encode(i.Shard_ID),
shardModeToString(i.GetMode()),
)
}
@@ -123,8 +120,8 @@ func shardModeToString(m control.ShardMode) string {
return "unknown"
}
-func sortShardsByID(ii []control.ShardInfo) {
+func sortShardsByID(ii []*control.ShardInfo) {
sort.Slice(ii, func(i, j int) bool {
- return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
+ return bytes.Compare(ii[i].Shard_ID, ii[j].Shard_ID) < 0
})
}
diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go
index 8fe01ba30..78f768965 100644
--- a/cmd/frostfs-cli/modules/control/shards_set_mode.go
+++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go
@@ -3,13 +3,13 @@ package control
import (
"bytes"
"fmt"
- "slices"
+ "sort"
"strings"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
@@ -117,10 +117,10 @@ func setShardMode(cmd *cobra.Command, _ []string) {
req.SetBody(body)
body.SetMode(mode)
- body.SetShard_ID(getShardIDList(cmd))
+ body.SetShardIDList(getShardIDList(cmd))
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
- body.SetResetErrorCounter(reset)
+ body.ClearErrorCounter(reset)
signRequest(cmd, pk, req)
@@ -145,17 +145,9 @@ func getShardIDList(cmd *cobra.Command) [][]byte {
return nil
}
- return getShardIDListFromIDFlag(cmd, true)
-}
-
-func getShardIDListFromIDFlag(cmd *cobra.Command, withAllFlag bool) [][]byte {
sidList, _ := cmd.Flags().GetStringSlice(shardIDFlag)
if len(sidList) == 0 {
- if withAllFlag {
- commonCmd.ExitOnErr(cmd, "", fmt.Errorf("either --%s or --%s flag must be provided", shardIDFlag, shardAllFlag))
- } else {
- commonCmd.ExitOnErr(cmd, "", fmt.Errorf("--%s flag value must be provided", shardIDFlag))
- }
+ commonCmd.ExitOnErr(cmd, "", fmt.Errorf("either --%s or --%s flag must be provided", shardIDFlag, shardAllFlag))
}
// We can sort the ID list and perform this check without additional allocations,
@@ -177,6 +169,9 @@ func getShardIDListFromIDFlag(cmd *cobra.Command, withAllFlag bool) [][]byte {
res = append(res, raw)
}
- slices.SortFunc(res, bytes.Compare)
+ sort.Slice(res, func(i, j int) bool {
+ return bytes.Compare(res[i], res[j]) < 0
+ })
+
return res
}
diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go
index 1e4575f49..2287344d6 100644
--- a/cmd/frostfs-cli/modules/control/synchronize_tree.go
+++ b/cmd/frostfs-cli/modules/control/synchronize_tree.go
@@ -4,12 +4,12 @@ import (
"crypto/sha256"
"errors"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
@@ -60,7 +60,7 @@ func synchronizeTree(cmd *cobra.Command, _ []string) {
},
}
- err := ctrlmessage.Sign(pk, req)
+ err := controlSvc.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
cli := getClient(cmd, pk)
diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go
index 41d9dbf8a..5ad675c0e 100644
--- a/cmd/frostfs-cli/modules/control/util.go
+++ b/cmd/frostfs-cli/modules/control/util.go
@@ -4,20 +4,16 @@ import (
"crypto/ecdsa"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"github.com/spf13/cobra"
)
-const (
- irFlagNameVUB = "vub"
-)
-
func initControlFlags(cmd *cobra.Command) {
ff := cmd.Flags()
ff.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
@@ -26,15 +22,8 @@ func initControlFlags(cmd *cobra.Command) {
ff.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
}
-func initControlIRFlags(cmd *cobra.Command) {
- initControlFlags(cmd)
-
- ff := cmd.Flags()
- ff.Uint32(irFlagNameVUB, 0, "Valid until block value for notary transaction")
-}
-
-func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req ctrlmessage.SignedMessage) {
- err := ctrlmessage.Sign(pk, req)
+func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req controlSvc.SignedMessage) {
+ err := controlSvc.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
}
@@ -44,7 +33,7 @@ func verifyResponse(cmd *cobra.Command,
GetSign() []byte
},
body interface {
- MarshalProtobuf([]byte) []byte
+ StableMarshal([]byte) []byte
},
) {
if sigControl == nil {
@@ -60,7 +49,7 @@ func verifyResponse(cmd *cobra.Command,
var sig frostfscrypto.Signature
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
- if !sig.Verify(body.MarshalProtobuf(nil)) {
+ if !sig.Verify(body.StableMarshal(nil)) {
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
}
}
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
deleted file mode 100644
index d0c9a641b..000000000
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/mr-tron/base58"
- "github.com/spf13/cobra"
-)
-
-const (
- asyncFlag = "async"
- restoreModeFlag = "restore-mode"
- shrinkFlag = "shrink"
-)
-
-var writecacheShardCmd = &cobra.Command{
- Use: "writecache",
- Short: "Operations with storage node's write-cache",
- Long: "Operations with storage node's write-cache",
-}
-
-var sealWritecacheShardCmd = &cobra.Command{
- Use: "seal",
- Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
- Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
- Run: sealWritecache,
-}
-
-func sealWritecache(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
- async, _ := cmd.Flags().GetBool(asyncFlag)
- restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
- shrink, _ := cmd.Flags().GetBool(shrinkFlag)
-
- req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Async: async,
- RestoreMode: restoreMode,
- Shrink: shrink,
- }}
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.SealWriteCacheResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.SealWriteCache(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- var success, failed uint
- for _, res := range resp.GetBody().GetResults() {
- if res.GetSuccess() {
- success++
- cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
- } else {
- failed++
- cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
- }
- }
- cmd.Printf("Total: %d success, %d failed\n", success, failed)
-}
-
-func initControlShardsWritecacheCmd() {
- writecacheShardCmd.AddCommand(sealWritecacheShardCmd)
-
- initControlFlags(sealWritecacheShardCmd)
-
- ff := sealWritecacheShardCmd.Flags()
- ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
- ff.Bool(shardAllFlag, false, "Process all shards")
- ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
- ff.Bool(asyncFlag, false, "Run operation in background")
- ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
- ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage")
-
- sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
-}
diff --git a/cmd/frostfs-cli/modules/netmap/get_epoch.go b/cmd/frostfs-cli/modules/netmap/get_epoch.go
index 8e60e69bf..1670f16c0 100644
--- a/cmd/frostfs-cli/modules/netmap/get_epoch.go
+++ b/cmd/frostfs-cli/modules/netmap/get_epoch.go
@@ -12,7 +12,7 @@ var getEpochCmd = &cobra.Command{
Use: "epoch",
Short: "Get current epoch number",
Long: "Get current epoch number",
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
p := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC)
diff --git a/cmd/frostfs-cli/modules/netmap/netinfo.go b/cmd/frostfs-cli/modules/netmap/netinfo.go
index 2dbd72355..ceea6a890 100644
--- a/cmd/frostfs-cli/modules/netmap/netinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/netinfo.go
@@ -16,7 +16,7 @@ var netInfoCmd = &cobra.Command{
Use: "netinfo",
Short: "Get information about FrostFS network",
Long: "Get information about FrostFS network",
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
p := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC)
@@ -43,8 +43,6 @@ var netInfoCmd = &cobra.Command{
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())
cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize())
- cmd.Printf(format, "Maximum count of data shards", netInfo.MaxECDataCount())
- cmd.Printf(format, "Maximum count of parity shards", netInfo.MaxECParityCount())
cmd.Printf(format, "Withdrawal fee", netInfo.WithdrawalFee())
cmd.Printf(format, "Homomorphic hashing disabled", netInfo.HomomorphicHashingDisabled())
cmd.Printf(format, "Maintenance mode allowed", netInfo.MaintenanceModeAllowed())
diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
index 5da66dcd9..bf73286ed 100644
--- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
@@ -18,7 +18,7 @@ var nodeInfoCmd = &cobra.Command{
Use: "nodeinfo",
Short: "Get target node info",
Long: `Get target node info`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
p := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC)
@@ -49,24 +49,24 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
var stateWord string
- switch i.Status() {
+ switch {
default:
stateWord = ""
- case netmap.Online:
+ case i.IsOnline():
stateWord = "online"
- case netmap.Offline:
+ case i.IsOffline():
stateWord = "offline"
- case netmap.Maintenance:
+ case i.IsMaintenance():
stateWord = "maintenance"
}
cmd.Println("state:", stateWord)
- for s := range i.NetworkEndpoints() {
+ netmap.IterateNetworkEndpoints(i, func(s string) {
cmd.Println("address:", s)
- }
+ })
- for key, value := range i.Attributes() {
+ i.IterateAttributes(func(key, value string) {
cmd.Printf("attribute: %s=%s\n", key, value)
- }
+ })
}
diff --git a/cmd/frostfs-cli/modules/netmap/root.go b/cmd/frostfs-cli/modules/netmap/root.go
index b4f5897e5..006ac6d9f 100644
--- a/cmd/frostfs-cli/modules/netmap/root.go
+++ b/cmd/frostfs-cli/modules/netmap/root.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"github.com/spf13/cobra"
)
@@ -9,12 +10,14 @@ var Cmd = &cobra.Command{
Use: "netmap",
Short: "Operations with Network Map",
Long: `Operations with Network Map`,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
// bind exactly that cmd's flags to
// the viper before execution
commonflags.Bind(cmd)
commonflags.BindAPI(cmd)
+ common.StartClientCommandSpan(cmd)
},
+ PersistentPostRun: common.StopClientCommandSpan,
}
func init() {
diff --git a/cmd/frostfs-cli/modules/netmap/snapshot.go b/cmd/frostfs-cli/modules/netmap/snapshot.go
index 650d8a1b9..eaaf598b9 100644
--- a/cmd/frostfs-cli/modules/netmap/snapshot.go
+++ b/cmd/frostfs-cli/modules/netmap/snapshot.go
@@ -12,7 +12,7 @@ var snapshotCmd = &cobra.Command{
Use: "snapshot",
Short: "Request current local snapshot of the network map",
Long: `Request current local snapshot of the network map`,
- Run: func(cmd *cobra.Command, _ []string) {
+ Run: func(cmd *cobra.Command, args []string) {
p := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC)
diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go
index 08a9ac4c8..e4e9cddb8 100644
--- a/cmd/frostfs-cli/modules/object/delete.go
+++ b/cmd/frostfs-cli/modules/object/delete.go
@@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
- objAddr = ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)
diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go
index 7312f5384..3136f086f 100644
--- a/cmd/frostfs-cli/modules/object/get.go
+++ b/cmd/frostfs-cli/modules/object/get.go
@@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)
@@ -99,10 +99,6 @@ func getObject(cmd *cobra.Command, _ []string) {
return
}
- if ok := printECInfoErr(cmd, err); ok {
- return
- }
-
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
@@ -136,7 +132,7 @@ func createOutWriter(cmd *cobra.Command, filename string) (out io.Writer, closer
out = os.Stdout
closer = func() {}
} else {
- f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
+ f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err))
}
diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go
index 25df375d4..26243e7e7 100644
--- a/cmd/frostfs-cli/modules/object/hash.go
+++ b/cmd/frostfs-cli/modules/object/hash.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
- _ = objectHashCmd.MarkFlagRequired("range")
-
+ flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@@ -52,7 +51,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
+ tz := typ == hashTz
+ fullHash := len(ranges) == 0
+ if fullHash {
+ var headPrm internalclient.HeadObjectPrm
+ headPrm.SetClient(cli)
+ Prepare(cmd, &headPrm)
+ headPrm.SetAddress(objAddr)
+
+ // get hash of full payload through HEAD (may be user can do it through dedicated command?)
+ res, err := internalclient.HeadObject(cmd.Context(), headPrm)
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ var cs checksum.Checksum
+ var csSet bool
+
+ if tz {
+ cs, csSet = res.Header().PayloadHomomorphicHash()
+ } else {
+ cs, csSet = res.Header().PayloadChecksum()
+ }
+
+ if csSet {
+ cmd.Println(hex.EncodeToString(cs.Value()))
+ } else {
+ cmd.Println("Missing checksum in object header.")
+ }
+
+ return
+ }
+
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
- if typ == hashTz {
+ if tz {
hashPrm.TZ()
}
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index 97e996cad..db466e588 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -6,12 +6,12 @@ import (
"fmt"
"os"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -38,6 +38,7 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.")
+ flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc)
@@ -47,7 +48,8 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
+ mainOnly, _ := cmd.Flags().GetBool("main-only")
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@@ -60,6 +62,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
+ prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil {
@@ -67,10 +70,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
return
}
- if ok := printECInfoErr(cmd, err); ok {
- return
- }
-
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
@@ -172,15 +171,6 @@ func printHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
cmd.Printf(" signature: %s\n", hex.EncodeToString(sigV2.GetSign()))
}
- if ecHeader := obj.ECHeader(); ecHeader != nil {
- cmd.Print("EC header:\n")
-
- cmd.Printf(" parent object ID: %s\n", ecHeader.Parent().EncodeToString())
- cmd.Printf(" index: %d\n", ecHeader.Index())
- cmd.Printf(" total: %d\n", ecHeader.Total())
- cmd.Printf(" header length: %d\n", ecHeader.HeaderLength())
- }
-
return printSplitHeader(cmd, obj)
}
diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go
index d67db9f0d..fa1898586 100644
--- a/cmd/frostfs-cli/modules/object/lock.go
+++ b/cmd/frostfs-cli/modules/object/lock.go
@@ -7,18 +7,17 @@ import (
"strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
- "github.com/spf13/viper"
)
// object lock command.
@@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
- endpoint := viper.GetString(commonflags.RPC)
+ endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)
@@ -95,7 +94,7 @@ var objectLockCmd = &cobra.Command{
obj := objectSDK.New()
obj.SetContainerID(cnr)
- obj.SetOwnerID(idOwner)
+ obj.SetOwnerID(&idOwner)
obj.SetType(objectSDK.TypeLock)
obj.SetAttributes(expirationAttr)
obj.SetPayload(lock.Marshal())
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 476238651..5499e5eec 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -4,17 +4,16 @@ import (
"context"
"crypto/ecdsa"
"encoding/hex"
- "encoding/json"
"errors"
"fmt"
- "slices"
+ "strconv"
"sync"
+ "text/tabwriter"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -28,56 +27,19 @@ import (
)
const (
- verifyPresenceAllFlag = "verify-presence-all"
- preferInternalAddressesFlag = "prefer-internal-addresses"
+ verifyPresenceAllFlag = "verify-presence-all"
)
-var (
- errNoAvailableEndpoint = errors.New("failed to create client: no available endpoint")
- errMalformedComplexObject = errors.New("object consists of EC and non EC parts")
-)
-
-type phyObject struct {
- containerID cid.ID
- objectID oid.ID
- storedOnAllContainerNodes bool
- ecHeader *ecHeader
+type objectNodesInfo struct {
+ containerID cid.ID
+ objectID oid.ID
+ relatedObjectIDs []oid.ID
+ isLock bool
}
-type ecHeader struct {
- index uint32
- parent oid.ID
-}
-
-type objectCounter struct {
- sync.Mutex
- total uint32
- isECcounted bool
-}
-
-type objectPlacement struct {
- requiredNodes []netmapSDK.NodeInfo
- confirmedNodes []netmapSDK.NodeInfo
-}
-
-type objectNodesResult struct {
- errors []error
- placements map[oid.ID]objectPlacement
- total uint32
-}
-
-type ObjNodesDataObject struct {
- ObjectID string `json:"object_id"`
- RequiredNodes []string `json:"required_nodes,omitempty"`
- ConfirmedNodes []string `json:"confirmed_nodes,omitempty"`
- ECParentObjectID *string `json:"ec_parent_object_id,omitempty"`
- ECIndex *uint32 `json:"ec_index,omitempty"`
-}
-
-type objNodesResultJSON struct {
- ObjectID string `json:"object_id"`
- DataObjects []ObjNodesDataObject `json:"data_objects,omitempty"`
- Errors []string `json:"errors,omitempty"`
+type boolError struct {
+ value bool
+ err error
}
var objectNodesCmd = &cobra.Command{
@@ -85,7 +47,7 @@ var objectNodesCmd = &cobra.Command{
Short: "List of nodes where the object is stored",
Long: `List of nodes where the object should be stored and where it is actually stored.
Lock objects must exist on all nodes of the container.
- For complex and EC objects, a node is considered to store an object if the node stores at least one part of the complex object or one chunk of the EC object.
+ For complex objects, a node is considered to store an object if the node stores at least one part of the complex object.
By default, the actual storage of the object is checked only on the nodes that should store the object. To check all nodes, use the flag --verify-presence-all.`,
Run: objectNodes,
}
@@ -101,31 +63,29 @@ func initObjectNodesCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.")
- flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.")
- flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.")
+ flags.Bool("verify-presence-all", false, "Verify the actual presence of the object on all netmap nodes")
}
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
- ReadObjectAddress(cmd, &cnrID, &objID)
+ readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
- objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
+ objectInfo := getObjectInfo(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
- result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
+ requiredPlacement := getRequiredPlacement(cmd, objectInfo, placementPolicy, netmap)
- getActualPlacement(cmd, netmap, pk, objects, count, result)
+ actualPlacement := getActualPlacement(cmd, netmap, requiredPlacement, pk, objectInfo)
- printPlacement(cmd, objID, objects, result)
+ printPlacement(cmd, netmap, requiredPlacement, actualPlacement)
}
-func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
+func getObjectInfo(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) *objectNodesInfo {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -140,142 +100,44 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- obj := phyObject{
+ return &objectNodesInfo{
containerID: cnrID,
objectID: objID,
- storedOnAllContainerNodes: res.Header().Type() == objectSDK.TypeLock ||
- res.Header().Type() == objectSDK.TypeTombstone ||
- len(res.Header().Children()) > 0,
+ isLock: res.Header().Type() == objectSDK.TypeLock,
}
- if res.Header().ECHeader() != nil {
- obj.ecHeader = &ecHeader{
- index: res.Header().ECHeader().Index(),
- parent: res.Header().ECHeader().Parent(),
- }
- }
- return []phyObject{obj}, 1
}
var errSplitInfo *objectSDK.SplitInfoError
- if errors.As(err, &errSplitInfo) {
- return getComplexObjectParts(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
+
+ if !errors.As(err, &errSplitInfo) {
+ commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
+ return nil
}
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
- }
- commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
- return nil, 0
-}
-
-func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
- members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
- return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
-}
-
-func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
- var total int
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
- if total = len(members); total > 0 {
- total-- // linking object is not data object
+ return &objectNodesInfo{
+ containerID: cnrID,
+ objectID: objID,
+ relatedObjectIDs: members,
}
- return members, total
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
- return members, len(members)
+ return &objectNodesInfo{
+ containerID: cnrID,
+ objectID: objID,
+ relatedObjectIDs: members,
+ }
}
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
- return members, len(members)
-}
-
-func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
- result := make([]phyObject, 0, len(members))
- var hasNonEC, hasEC bool
- var resultGuard sync.Mutex
-
- if len(members) == 0 {
- return result
+ return &objectNodesInfo{
+ containerID: cnrID,
+ objectID: objID,
+ relatedObjectIDs: members,
}
-
- prmHead.SetRawFlag(true) // to get an error instead of whole object
-
- eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := range members {
- partObjID := members[idx]
-
- eg.Go(func() error {
- partHeadPrm := prmHead
- var partAddr oid.Address
- partAddr.SetContainer(cnrID)
- partAddr.SetObject(partObjID)
- partHeadPrm.SetAddress(partAddr)
-
- obj, err := internalclient.HeadObject(egCtx, partHeadPrm)
- if err != nil {
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- resultGuard.Lock()
- defer resultGuard.Unlock()
- result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...)
- hasEC = true
- return nil
- }
- return err
- }
-
- if obj.Header().Type() != objectSDK.TypeRegular {
- commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type()))
- }
-
- if len(obj.Header().Children()) > 0 {
- // linking object is not data object, so skip it
- return nil
- }
-
- resultGuard.Lock()
- defer resultGuard.Unlock()
- result = append(result, phyObject{
- containerID: cnrID,
- objectID: partObjID,
- })
- hasNonEC = true
-
- return nil
- })
- }
-
- commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait())
- if hasEC && hasNonEC {
- commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject)
- }
- return result
-}
-
-func getECObjectChunks(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, errECInfo *objectSDK.ECInfoError) []phyObject {
- ecInfo := errECInfo.ECInfo()
- result := make([]phyObject, 0, len(ecInfo.Chunks))
- for _, ch := range ecInfo.Chunks {
- var chID oid.ID
- err := chID.ReadFromV2(ch.ID)
- if err != nil {
- commonCmd.ExitOnErr(cmd, "failed to read EC chunk ID %w", err)
- return nil
- }
- result = append(result, phyObject{
- containerID: cnrID,
- objectID: chID,
- ecHeader: &ecHeader{
- index: ch.Index,
- parent: objID,
- },
- })
- }
- return result
}
func getPlacementPolicyAndNetmap(cmd *cobra.Command, cnrID cid.ID, cli *client.Client) (placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) {
@@ -320,114 +182,87 @@ func getNetMap(ctx context.Context, cli *client.Client) (*netmapSDK.NetMap, erro
return &nm, nil
}
-func getRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
- if policy.IsECPlacement(placementPolicy) {
- return getECRequiredPlacement(cmd, objects, placementPolicy, netmap)
- }
- return getReplicaRequiredPlacement(cmd, objects, placementPolicy, netmap)
-}
-
-func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
- result := &objectNodesResult{
- placements: make(map[oid.ID]objectPlacement),
- }
+func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) map[uint64]netmapSDK.NodeInfo {
+ nodes := make(map[uint64]netmapSDK.NodeInfo)
placementBuilder := placement.NewNetworkMapBuilder(netmap)
- for _, object := range objects {
- placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy)
- commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
- for repIdx, rep := range placement {
- numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
- var nodeIdx uint32
- for _, n := range rep {
- if !object.storedOnAllContainerNodes && nodeIdx == numOfReplicas {
- break
- }
-
- op := result.placements[object.objectID]
- op.requiredNodes = append(op.requiredNodes, n)
- result.placements[object.objectID] = op
-
- nodeIdx++
- }
- }
- }
-
- return result
-}
-
-func getECRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
- result := &objectNodesResult{
- placements: make(map[oid.ID]objectPlacement),
- }
- for _, object := range objects {
- getECRequiredPlacementInternal(cmd, object, placementPolicy, netmap, result)
- }
- return result
-}
-
-func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap, result *objectNodesResult) {
- placementObjectID := object.objectID
- if object.ecHeader != nil {
- placementObjectID = object.ecHeader.parent
- }
- placementBuilder := placement.NewNetworkMapBuilder(netmap)
- placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy)
+ placement, err := placementBuilder.BuildPlacement(objInfo.containerID, &objInfo.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
-
- for _, vector := range placement {
- if object.storedOnAllContainerNodes {
- for _, node := range vector {
- op := result.placements[object.objectID]
- op.requiredNodes = append(op.requiredNodes, node)
- result.placements[object.objectID] = op
+ for repIdx, rep := range placement {
+ numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
+ var nodeIdx uint32
+ for _, n := range rep {
+ if !objInfo.isLock && nodeIdx == numOfReplicas { //lock object should be on all container nodes
+ break
}
- continue
- }
-
- if object.ecHeader != nil {
- chunkIdx := int(object.ecHeader.index)
- nodeIdx := chunkIdx % len(vector)
- node := vector[nodeIdx]
-
- op := result.placements[object.objectID]
- op.requiredNodes = append(op.requiredNodes, node)
- result.placements[object.objectID] = op
+ nodes[n.Hash()] = n
+ nodeIdx++
}
}
+
+ for _, relatedObjID := range objInfo.relatedObjectIDs {
+ placement, err = placementBuilder.BuildPlacement(objInfo.containerID, &relatedObjID, placementPolicy)
+ commonCmd.ExitOnErr(cmd, "failed to get required placement for related object: %w", err)
+ for _, rep := range placement {
+ for _, n := range rep {
+ nodes[n.Hash()] = n
+ }
+ }
+ }
+
+ return nodes
}
-func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
+func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
+ pk *ecdsa.PrivateKey, objInfo *objectNodesInfo) map[uint64]boolError {
+ result := make(map[uint64]boolError)
resultMtx := &sync.Mutex{}
- counter := &objectCounter{
- total: uint32(count),
- }
- candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
+ var candidates []netmapSDK.NodeInfo
+ checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag)
+ if checkAllNodes {
+ candidates = netmap.Nodes()
+ } else {
+ for _, n := range requiredPlacement {
+ candidates = append(candidates, n)
+ }
+ }
eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates {
+ cand := cand
+
eg.Go(func() error {
cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil {
resultMtx.Lock()
defer resultMtx.Unlock()
- result.errors = append(result.errors, fmt.Errorf("failed to connect to node %s: %w", hex.EncodeToString(cand.PublicKey()), err))
+ result[cand.Hash()] = boolError{err: err}
return nil
}
- for _, object := range objects {
+ eg.Go(func() error {
+ var v boolError
+ v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, objInfo.objectID, cli, pk)
+ resultMtx.Lock()
+ defer resultMtx.Unlock()
+ if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
+ return nil
+ }
+ result[cand.Hash()] = v
+ return nil
+ })
+
+ for _, rObjID := range objInfo.relatedObjectIDs {
+ rObjID := rObjID
eg.Go(func() error {
- stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
+ var v boolError
+ v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, rObjID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
- if err == nil && stored {
- op := result.placements[object.objectID]
- op.confirmedNodes = append(op.confirmedNodes, cand)
- result.placements[object.objectID] = op
- }
- if err != nil {
- result.errors = append(result.errors, fmt.Errorf("failed to check object %s existence on node %s: %w", object.objectID.EncodeToString(), hex.EncodeToString(cand.PublicKey()), err))
+ if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
+ return nil
}
+ result[cand.Hash()] = v
return nil
})
}
@@ -436,38 +271,17 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
- result.total = counter.total
-}
-
-func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
- checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag)
- if checkAllNodes {
- return netmap.Nodes()
- }
- var nodes []netmapSDK.NodeInfo
- visited := make(map[uint64]struct{})
- for _, p := range result.placements {
- for _, node := range p.requiredNodes {
- if _, ok := visited[node.Hash()]; !ok {
- nodes = append(nodes, node)
- visited[node.Hash()] = struct{}{}
- }
- }
- }
- return nodes
+ return result
}
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
var cli *client.Client
var addresses []string
- if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
- addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
- addresses = append(addresses, candidate.ExternalAddresses()...)
- } else {
- addresses = append(addresses, candidate.ExternalAddresses()...)
- addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints())
- }
-
+ candidate.IterateNetworkEndpoints(func(s string) bool {
+ addresses = append(addresses, s)
+ return false
+ })
+ addresses = append(addresses, candidate.ExternalAddresses()...)
var lastErr error
for _, address := range addresses {
var networkAddr network.Address
@@ -484,12 +298,12 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return nil, lastErr
}
if cli == nil {
- return nil, errNoAvailableEndpoint
+ return nil, fmt.Errorf("failed to create client: no available endpoint")
}
return cli, nil
}
-func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
+func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@@ -504,14 +318,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil {
- if res.Header().ECHeader() != nil {
- counter.Lock()
- defer counter.Unlock()
- if !counter.isECcounted {
- counter.total *= res.Header().ECHeader().Total()
- }
- counter.isECcounted = true
- }
return true, nil
}
var notFound *apistatus.ObjectNotFound
@@ -522,81 +328,24 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
return false, err
}
-func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
- printObjectNodesAsJSON(cmd, objID, objects, result)
- } else {
- printObjectNodesAsText(cmd, objID, objects, result)
- }
-}
-
-func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
- fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
-
- for _, object := range objects {
- fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)
- if object.ecHeader != nil {
- fmt.Fprintf(cmd.OutOrStdout(), "\tEC index: %d\n", object.ecHeader.index)
- fmt.Fprintf(cmd.OutOrStdout(), "\tEC parent: %s\n", object.ecHeader.parent.EncodeToString())
- }
- op, ok := result.placements[object.objectID]
- if !ok {
- continue
- }
- if len(op.requiredNodes) > 0 {
- fmt.Fprintf(cmd.OutOrStdout(), "\tRequired nodes:\n")
- for _, node := range op.requiredNodes {
- fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey()))
+func printPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo, actualPlacement map[uint64]boolError) {
+ w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
+ defer func() {
+ commonCmd.ExitOnErr(cmd, "failed to print placement info: %w", w.Flush())
+ }()
+ fmt.Fprintln(w, "Node ID\tShould contain object\tActually contains object\t")
+ for _, n := range netmap.Nodes() {
+ nodeID := hex.EncodeToString(n.PublicKey())
+ _, required := requiredPlacement[n.Hash()]
+ actual, actualExists := actualPlacement[n.Hash()]
+ actualStr := ""
+ if actualExists {
+ if actual.err != nil {
+ actualStr = fmt.Sprintf("error: %v", actual.err)
+ } else {
+ actualStr = strconv.FormatBool(actual.value)
}
}
- if len(op.confirmedNodes) > 0 {
- fmt.Fprintf(cmd.OutOrStdout(), "\tConfirmed nodes:\n")
- for _, node := range op.confirmedNodes {
- fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey()))
- }
- }
- }
-
- if len(result.errors) == 0 {
- return
- }
- fmt.Fprintf(cmd.OutOrStdout(), "Errors:\n")
- for _, err := range result.errors {
- fmt.Fprintf(cmd.OutOrStdout(), "\t%s\n", err.Error())
+ fmt.Fprintf(w, "%s\t%s\t%s\t\n", nodeID, strconv.FormatBool(required), actualStr)
}
}
-
-func printObjectNodesAsJSON(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
- jsonResult := &objNodesResultJSON{
- ObjectID: objID.EncodeToString(),
- }
-
- for _, object := range objects {
- do := ObjNodesDataObject{
- ObjectID: object.objectID.EncodeToString(),
- }
- if object.ecHeader != nil {
- do.ECIndex = &object.ecHeader.index
- ecParent := object.ecHeader.parent.EncodeToString()
- do.ECParentObjectID = &ecParent
- }
- op, ok := result.placements[object.objectID]
- if !ok {
- continue
- }
- for _, rn := range op.requiredNodes {
- do.RequiredNodes = append(do.RequiredNodes, hex.EncodeToString(rn.PublicKey()))
- }
- for _, cn := range op.confirmedNodes {
- do.ConfirmedNodes = append(do.ConfirmedNodes, hex.EncodeToString(cn.PublicKey()))
- }
- jsonResult.DataObjects = append(jsonResult.DataObjects, do)
- }
- for _, err := range result.errors {
- jsonResult.Errors = append(jsonResult.Errors, err.Error())
- }
- b, err := json.Marshal(jsonResult)
- commonCmd.ExitOnErr(cmd, "failed to marshal json: %w", err)
- cmd.Println(string(b))
-}
diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go
deleted file mode 100644
index ebbde76a2..000000000
--- a/cmd/frostfs-cli/modules/object/patch.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package object
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/spf13/cobra"
-)
-
-const (
- newAttrsFlagName = "new-attrs"
- replaceAttrsFlagName = "replace-attrs"
- rangeFlagName = "range"
- payloadFlagName = "payload"
- splitHeaderFlagName = "split-header"
-)
-
-var objectPatchCmd = &cobra.Command{
- Use: "patch",
- Run: patch,
- Short: "Patch FrostFS object",
- Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.",
- Example: `
-frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs
-frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY
-frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload
-`,
-}
-
-func initObjectPatchCmd() {
- commonflags.Init(objectPatchCmd)
- initFlagSession(objectPatchCmd, "PATCH")
-
- flags := objectPatchCmd.Flags()
-
- flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- _ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag)
-
- flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
- _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
-
- flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
- flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
- flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
- flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
- flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
-}
-
-func patch(cmd *cobra.Command, _ []string) {
- var cnr cid.ID
- var obj oid.ID
-
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
-
- ranges, err := getRangeSlice(cmd)
- commonCmd.ExitOnErr(cmd, "", err)
-
- payloads := patchPayloadPaths(cmd)
-
- if len(ranges) != len(payloads) {
- commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads)))
- }
-
- newAttrs, err := parseNewObjectAttrs(cmd)
- commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err)
- replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName)
-
- pk := key.GetOrGenerate(cmd)
-
- cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
-
- var prm internalclient.PatchObjectPrm
- prm.SetClient(cli)
- Prepare(cmd, &prm)
- ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
-
- prm.SetAddress(objAddr)
- prm.NewAttributes = newAttrs
- prm.ReplaceAttribute = replaceAttrs
-
- prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
-
- for i := range ranges {
- prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
- Range: ranges[i],
- PayloadPath: payloads[i],
- })
- }
-
- res, err := internalclient.Patch(cmd.Context(), prm)
- if err != nil {
- commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err)
- }
- cmd.Println("Patched object ID: ", res.OID.EncodeToString())
-}
-
-func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
- if err != nil {
- return nil, err
- }
-
- attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
- for i := range rawAttrs {
- k, v, found := strings.Cut(rawAttrs[i], "=")
- if !found {
- return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
- }
- attrs[i].SetKey(k)
- attrs[i].SetValue(v)
- }
- return attrs, nil
-}
-
-func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) {
- v, _ := cmd.Flags().GetStringSlice(rangeFlagName)
- if len(v) == 0 {
- return []objectSDK.Range{}, nil
- }
- rs := make([]objectSDK.Range, len(v))
- for i := range v {
- before, after, found := strings.Cut(v[i], rangeSep)
- if !found {
- return nil, fmt.Errorf("invalid range specifier: %s", v[i])
- }
-
- offset, err := strconv.ParseUint(before, 10, 64)
- if err != nil {
- return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err)
- }
- length, err := strconv.ParseUint(after, 10, 64)
- if err != nil {
- return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err)
- }
-
- rs[i].SetOffset(offset)
- rs[i].SetLength(length)
- }
- return rs, nil
-}
-
-func patchPayloadPaths(cmd *cobra.Command) []string {
- v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
- return v
-}
-
-func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
- path, _ := cmd.Flags().GetString(splitHeaderFlagName)
- if path == "" {
- return nil
- }
-
- data, err := os.ReadFile(path)
- commonCmd.ExitOnErr(cmd, "read file error: %w", err)
-
- splitHdrV2 := new(objectV2.SplitHeader)
- err = splitHdrV2.Unmarshal(data)
- if err != nil {
- err = splitHdrV2.UnmarshalJSON(data)
- commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
- }
-
- return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
-}
diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go
index 9e8a7cc6f..97bb12dbc 100644
--- a/cmd/frostfs-cli/modules/object/put.go
+++ b/cmd/frostfs-cli/modules/object/put.go
@@ -10,11 +10,11 @@ import (
"strings"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
+ flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@@ -93,7 +93,7 @@ func putObject(cmd *cobra.Command, _ []string) {
attrs := getAllObjectAttributes(cmd)
obj.SetContainerID(cnr)
- obj.SetOwnerID(ownerID)
+ obj.SetOwnerID(&ownerID)
obj.SetAttributes(attrs...)
notificationInfo, err := parseObjectNotifications(cmd)
@@ -160,7 +160,7 @@ func readFilePayload(filename string, cmd *cobra.Command) (io.Reader, cid.ID, us
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
payloadReader := bytes.NewReader(objTemp.Payload())
cnr, _ := objTemp.ContainerID()
- ownerID := objTemp.OwnerID()
+ ownerID := *objTemp.OwnerID()
return payloadReader, cnr, ownerID
}
@@ -174,7 +174,7 @@ func setFilePayloadReader(cmd *cobra.Command, f *os.File, prm *internalclient.Pu
p := pb.New64(fi.Size())
p.Output = cmd.OutOrStdout()
prm.SetPayloadReader(p.NewProxyReader(f))
- prm.SetHeaderCallback(func() { p.Start() })
+ prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
return p
}
@@ -182,7 +182,7 @@ func setBinaryPayloadReader(cmd *cobra.Command, obj *objectSDK.Object, prm *inte
p := pb.New(len(obj.Payload()))
p.Output = cmd.OutOrStdout()
prm.SetPayloadReader(p.NewProxyReader(payloadReader))
- prm.SetHeaderCallback(func() { p.Start() })
+ prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
return p
}
@@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
- rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
- if err != nil {
- return nil, err
+ var rawAttrs []string
+
+ raw := cmd.Flag("attributes").Value.String()
+ if len(raw) != 0 {
+ rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go
index 6ec508ae2..0eee7bdba 100644
--- a/cmd/frostfs-cli/modules/object/range.go
+++ b/cmd/frostfs-cli/modules/object/range.go
@@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
- flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
+ flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
- objAddr := ReadObjectAddress(cmd, &cnr, &obj)
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@@ -146,59 +146,12 @@ func marshalSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) ([]byte, er
}
}
-func printECInfoErr(cmd *cobra.Command, err error) bool {
- var errECInfo *objectSDK.ECInfoError
-
- ok := errors.As(err, &errECInfo)
-
- if ok {
- toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
- toProto, _ := cmd.Flags().GetBool("proto")
- if !toJSON && !toProto {
- cmd.PrintErrln("Object is erasure-encoded, ec information received.")
- }
- printECInfo(cmd, errECInfo.ECInfo())
- }
-
- return ok
-}
-
-func printECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) {
- bs, err := marshalECInfo(cmd, info)
- commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err)
-
- cmd.Println(string(bs))
-}
-
-func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
- toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
- toProto, _ := cmd.Flags().GetBool("proto")
- switch {
- case toJSON && toProto:
- return nil, errors.New("'--json' and '--proto' flags are mutually exclusive")
- case toJSON:
- return info.MarshalJSON()
- case toProto:
- return info.Marshal()
- default:
- b := bytes.NewBuffer(nil)
- b.WriteString("Total chunks: " + strconv.Itoa(int(info.Chunks[0].Total)))
- for _, chunk := range info.Chunks {
- var id oid.ID
- if err := id.Decode(chunk.ID.GetValue()); err != nil {
- return nil, fmt.Errorf("unable to decode chunk id: %w", err)
- }
- b.WriteString("\n Index: " + strconv.Itoa(int(chunk.Index)) + " ID: " + id.String())
- }
- return b.Bytes(), nil
- }
-}
-
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
- vs, err := cmd.Flags().GetStringSlice("range")
- if len(vs) == 0 || err != nil {
- return nil, err
+ v := cmd.Flag("range").Value.String()
+ if len(v) == 0 {
+ return nil, nil
}
+ vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)
diff --git a/cmd/frostfs-cli/modules/object/root.go b/cmd/frostfs-cli/modules/object/root.go
index b808a509e..c46fc058a 100644
--- a/cmd/frostfs-cli/modules/object/root.go
+++ b/cmd/frostfs-cli/modules/object/root.go
@@ -1,6 +1,7 @@
package object
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"github.com/spf13/cobra"
)
@@ -10,12 +11,14 @@ var Cmd = &cobra.Command{
Use: "object",
Short: "Operations with Objects",
Long: `Operations with Objects`,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
// bind exactly that cmd's flags to
// the viper before execution
commonflags.Bind(cmd)
commonflags.BindAPI(cmd)
+ common.StartClientCommandSpan(cmd)
},
+ PersistentPostRun: common.StopClientCommandSpan,
}
func init() {
@@ -28,9 +31,7 @@ func init() {
objectHashCmd,
objectRangeCmd,
objectLockCmd,
- objectNodesCmd,
- objectPatchCmd,
- }
+ objectNodesCmd}
Cmd.AddCommand(objectChildCommands...)
@@ -40,7 +41,6 @@ func init() {
}
initObjectPutCmd()
- initObjectPatchCmd()
initObjectDeleteCmd()
initObjectGetCmd()
initObjectSearchCmd()
diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go
index 8e4e8b287..37e9f74e0 100644
--- a/cmd/frostfs-cli/modules/object/util.go
+++ b/cmd/frostfs-cli/modules/object/util.go
@@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
-func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
+func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)
@@ -262,8 +262,13 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client
if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
- objs = collectObjectRelatives(cmd, cli, cnr, *obj)
- objs = append(objs, *obj)
+ rels := collectObjectRelatives(cmd, cli, cnr, *obj)
+
+ if len(rels) == 0 {
+ objs = []oid.ID{*obj}
+ } else {
+ objs = append(rels, *obj)
+ }
}
}
@@ -301,8 +306,6 @@ func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, ke
case *internal.PutObjectPrm:
common.PrintVerbose(cmd, "Binding session to object PUT...")
tok.ForVerb(session.VerbObjectPut)
- case *internal.PatchObjectPrm:
- tok.ForVerb(session.VerbObjectPatch)
case *internal.DeleteObjectPrm:
common.PrintVerbose(cmd, "Binding session to object DELETE...")
tok.ForVerb(session.VerbObjectDelete)
@@ -351,38 +354,31 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
Prepare(cmd, &prmHead)
- o, err := internal.HeadObject(cmd.Context(), prmHead)
+ _, err := internal.HeadObject(cmd.Context(), prmHead)
var errSplit *objectSDK.SplitInfoError
- var errEC *objectSDK.ECInfoError
switch {
default:
commonCmd.ExitOnErr(cmd, "failed to get raw object header: %w", err)
case err == nil:
common.PrintVerbose(cmd, "Raw header received - object is singular.")
- if ech := o.Header().ECHeader(); ech != nil {
- commonCmd.ExitOnErr(cmd, "Lock EC chunk failed: %w", errors.ErrUnsupported)
- }
return nil
case errors.As(err, &errSplit):
common.PrintVerbose(cmd, "Split information received - object is virtual.")
- splitInfo := errSplit.SplitInfo()
-
- if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
- return members
- }
-
- if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnr); ok {
- return members
- }
-
- return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
- case errors.As(err, &errEC):
- common.PrintVerbose(cmd, "Object is erasure-coded.")
- return nil
}
- return nil
+
+ splitInfo := errSplit.SplitInfo()
+
+ if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
+ return members
+ }
+
+ if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnr); ok {
+ return members
+ }
+
+ return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
}
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
@@ -406,6 +402,7 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
+ // include linking object
return append(children, idLinking), true
}
diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go
index 88acab341..808bd6d07 100644
--- a/cmd/frostfs-cli/modules/root.go
+++ b/cmd/frostfs-cli/modules/root.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
accountingCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl"
- apemanager "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/ape_manager"
bearerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/bearer"
containerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/container"
controlCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/control"
@@ -21,6 +20,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
+ "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -46,10 +46,6 @@ of frostfs-api and some useful utilities for compiling ACL rules from JSON
notation, managing container access through protocol gates, querying network map
and much more!`,
Run: entryPoint,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
- common.StartClientCommandSpan(cmd)
- },
- PersistentPostRun: common.StopClientCommandSpan,
}
// Execute adds all child commands to the root command and sets flags appropriately.
@@ -61,7 +57,6 @@ func Execute() {
func init() {
cobra.OnInitialize(initConfig)
- cobra.EnableTraverseRunHooks = true
// use stdout as default output for cmd.Print()
rootCmd.SetOut(os.Stdout)
@@ -81,7 +76,6 @@ func init() {
rootCmd.Flags().Bool("version", false, "Application version and FrostFS API compatibility")
rootCmd.AddCommand(acl.Cmd)
- rootCmd.AddCommand(apemanager.Cmd)
rootCmd.AddCommand(bearerCli.Cmd)
rootCmd.AddCommand(sessionCli.Cmd)
rootCmd.AddCommand(accountingCli.Cmd)
@@ -111,16 +105,14 @@ func initConfig() {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
- // Find config directory.
- configDir, err := os.UserConfigDir()
- if err != nil {
- common.PrintVerbose(rootCmd, "Get config dir: %s", err)
- } else {
- // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml"
- viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli"))
- viper.SetConfigName("config")
- viper.SetConfigType("yaml")
- }
+ // Find home directory.
+ home, err := homedir.Dir()
+ commonCmd.ExitOnErr(rootCmd, "", err)
+
+ // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
+ viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
+ viper.SetConfigName("config")
+ viper.SetConfigType("yaml")
}
viper.SetEnvPrefix(envPrefix)
diff --git a/cmd/frostfs-cli/modules/session/create.go b/cmd/frostfs-cli/modules/session/create.go
index e13200a5d..f9705c023 100644
--- a/cmd/frostfs-cli/modules/session/create.go
+++ b/cmd/frostfs-cli/modules/session/create.go
@@ -6,6 +6,7 @@ import (
"os"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@@ -29,10 +30,12 @@ var createCmd = &cobra.Command{
Use: "create",
Short: "Create session token",
Run: createSession,
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, cmd.Flags().Lookup(commonflags.Account))
+ common.StartClientCommandSpan(cmd)
},
+ PersistentPostRun: common.StopClientCommandSpan,
}
func init() {
@@ -78,7 +81,7 @@ func createSession(cmd *cobra.Command, _ []string) {
}
filename, _ := cmd.Flags().GetString(outFlag)
- err = os.WriteFile(filename, data, 0o644)
+ err = os.WriteFile(filename, data, 0644)
commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err)
}
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index e2c05d486..4ac1ed198 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -30,6 +30,8 @@ func initAddCmd() {
ff := addCmd.Flags()
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func add(cmd *cobra.Command, _ []string) {
@@ -45,10 +47,9 @@ func add(cmd *cobra.Command, _ []string) {
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
@@ -71,18 +72,18 @@ func add(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.Add(ctx, req)
- commonCmd.ExitOnErr(cmd, "failed to call add: %w", err)
+ commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
- cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
+ cmd.Println("Node ID: ", resp.Body.NodeId)
}
-func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
+func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
if len(raws) == 0 {
return nil, nil
}
- pairs := make([]tree.KeyValue, 0, len(raws))
+ pairs := make([]*tree.KeyValue, 0, len(raws))
for i := range raws {
k, v, found := strings.Cut(raws[i], "=")
if !found {
@@ -93,7 +94,7 @@ func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
pair.Key = k
pair.Value = []byte(v)
- pairs = append(pairs, pair)
+ pairs = append(pairs, &pair)
}
return pairs, nil
diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go
index 7263bcd0d..ea815dbfe 100644
--- a/cmd/frostfs-cli/modules/tree/add_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/add_by_path.go
@@ -36,6 +36,7 @@ func initAddByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
}
@@ -49,10 +50,9 @@ func addByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index d71a94b98..4f4f54657 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -2,67 +2,50 @@ package tree
import (
"context"
- "crypto/tls"
- "fmt"
+ "strings"
+ "time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
// _client returns grpc Tree service client. Should be removed
// after making Tree API public.
-func _client() (tree.TreeServiceClient, error) {
+func _client(ctx context.Context) (tree.TreeServiceClient, error) {
var netAddr network.Address
-
- rpcEndpoint := viper.GetString(commonflags.RPC)
- if rpcEndpoint == "" {
- return nil, fmt.Errorf("%s is not defined", commonflags.RPC)
- }
-
- err := netAddr.FromString(rpcEndpoint)
+ err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
return nil, err
}
- host, isTLS, err := client.ParseURI(netAddr.URIAddr())
- if err != nil {
- return nil, err
- }
-
- creds := insecure.NewCredentials()
- if isTLS {
- creds = credentials.NewTLS(&tls.Config{})
- }
-
opts := []grpc.DialOption{
+ grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
- tracing.NewUnaryClientInterceptor(),
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- grpc.WithDisableServiceConfig(),
- grpc.WithTransportCredentials(creds),
}
- cc, err := grpc.NewClient(host, opts...)
+ if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ // a default connection establishing timeout
+ const defaultClientConnectTimeout = time.Second * 2
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
+ cancel()
+
return tree.NewTreeServiceClient(cc), err
}
-
-func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) {
- if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
- common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
- return context.WithTimeout(cmd.Context(), timeout)
- }
- return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault)
-}
diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go
index 210630e60..f239066cd 100644
--- a/cmd/frostfs-cli/modules/tree/get_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/get_by_path.go
@@ -36,6 +36,8 @@ func initGetByPathCmd() {
ff.String(pathFlagKey, "", "Path to a node")
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getByPath(cmd *cobra.Command, _ []string) {
@@ -48,10 +50,9 @@ func getByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go
index 9d767ab3e..b1b307f62 100644
--- a/cmd/frostfs-cli/modules/tree/get_op_log.go
+++ b/cmd/frostfs-cli/modules/tree/get_op_log.go
@@ -30,6 +30,8 @@ func initGetOpLogCmd() {
ff := getOpLogCmd.Flags()
ff.Uint64(heightFlagKey, 0, "Height to start with")
ff.Uint64(countFlagKey, 10, "Logged operations count")
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getOpLog(cmd *cobra.Command, _ []string) {
@@ -42,10 +44,9 @@ func getOpLog(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go
index c581b8e26..f0506467e 100644
--- a/cmd/frostfs-cli/modules/tree/healthcheck.go
+++ b/cmd/frostfs-cli/modules/tree/healthcheck.go
@@ -20,14 +20,15 @@ var healthcheckCmd = &cobra.Command{
func initHealthcheckCmd() {
commonflags.Init(healthcheckCmd)
+ ff := healthcheckCmd.Flags()
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func healthcheck(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
req := &tree.HealthcheckRequest{
diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go
index ee1db2a79..a25d066d5 100644
--- a/cmd/frostfs-cli/modules/tree/list.go
+++ b/cmd/frostfs-cli/modules/tree/list.go
@@ -26,6 +26,8 @@ func initListCmd() {
ff := listCmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func list(cmd *cobra.Command, _ []string) {
@@ -36,10 +38,9 @@ func list(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 7a369bd02..84b2fb80e 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -33,6 +33,8 @@ func initMoveCmd() {
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func move(cmd *cobra.Command, _ []string) {
@@ -43,10 +45,9 @@ func move(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
@@ -65,7 +66,7 @@ func move(cmd *cobra.Command, _ []string) {
Body: &tree.GetSubTreeRequest_Body{
ContainerId: rawCID,
TreeId: tid,
- RootId: []uint64{nid},
+ RootId: nid,
Depth: 1,
BearerToken: bt,
},
@@ -74,7 +75,7 @@ func move(cmd *cobra.Command, _ []string) {
resp, err := cli.GetSubTree(ctx, subTreeReq)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
- var meta []tree.KeyValue
+ var meta []*tree.KeyValue
subtreeResp, err := resp.Recv()
for ; err == nil; subtreeResp, err = resp.Recv() {
meta = subtreeResp.GetBody().GetMeta()
diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go
index 3c532fe26..74e9d9749 100644
--- a/cmd/frostfs-cli/modules/tree/remove.go
+++ b/cmd/frostfs-cli/modules/tree/remove.go
@@ -29,6 +29,8 @@ func initRemoveCmd() {
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func remove(cmd *cobra.Command, _ []string) {
@@ -39,10 +41,9 @@ func remove(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/root.go b/cmd/frostfs-cli/modules/tree/root.go
index 5a53c50d6..701a78f2a 100644
--- a/cmd/frostfs-cli/modules/tree/root.go
+++ b/cmd/frostfs-cli/modules/tree/root.go
@@ -40,7 +40,8 @@ const (
metaFlagKey = "meta"
- pathFlagKey = "path"
+ pathFlagKey = "path"
+ pathAttributeFlagKey = "pattr"
latestOnlyFlagKey = "latest"
@@ -49,7 +50,6 @@ const (
heightFlagKey = "height"
countFlagKey = "count"
depthFlagKey = "depth"
- orderFlagKey = "ordered"
)
func initCTID(cmd *cobra.Command) {
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index c5f7ad401..64cb351ec 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -30,10 +30,11 @@ func initGetSubtreeCmd() {
ff := getSubtreeCmd.Flags()
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
- ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.")
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
+
+ _ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getSubTree(cmd *cobra.Command, _ []string) {
@@ -44,10 +45,9 @@ func getSubTree(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx, cancel := contextWithTimeout(cmd)
- defer cancel()
+ ctx := cmd.Context()
- cli, err := _client()
+ cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
@@ -59,13 +59,6 @@ func getSubTree(cmd *cobra.Command, _ []string) {
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
- order, _ := cmd.Flags().GetBool(orderFlagKey)
-
- bodyOrder := tree.GetSubTreeRequest_Body_Order_None
- if order {
- bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc
- }
-
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
@@ -75,12 +68,9 @@ func getSubTree(cmd *cobra.Command, _ []string) {
Body: &tree.GetSubTreeRequest_Body{
ContainerId: rawCID,
TreeId: tid,
- RootId: []uint64{rid},
+ RootId: rid,
Depth: depth,
BearerToken: bt,
- OrderBy: &tree.GetSubTreeRequest_Body_Order{
- Direction: bodyOrder,
- },
},
}
@@ -93,15 +83,10 @@ func getSubTree(cmd *cobra.Command, _ []string) {
for ; err == nil; subtreeResp, err = resp.Recv() {
b := subtreeResp.GetBody()
- if len(b.GetNodeId()) == 1 {
- cmd.Printf("Node ID: %d\n", b.GetNodeId())
- cmd.Println("\tParent ID: ", b.GetParentId())
- cmd.Println("\tTimestamp: ", b.GetTimestamp())
- } else {
- cmd.Printf("Node IDs: %v\n", b.GetNodeId())
- cmd.Println("\tParent IDs: ", b.GetParentId())
- cmd.Println("\tTimestamps: ", b.GetTimestamp())
- }
+ cmd.Printf("Node ID: %d\n", b.GetNodeId())
+
+ cmd.Println("\tParent ID: ", b.GetParentId())
+ cmd.Println("\tTimestamp: ", b.GetTimestamp())
if meta := b.GetMeta(); len(meta) > 0 {
cmd.Println("\tMeta pairs: ")
diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go
index 145dcc756..4c2e324b3 100644
--- a/cmd/frostfs-cli/modules/util/acl.go
+++ b/cmd/frostfs-cli/modules/util/acl.go
@@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
fmt.Fprintln(w, strings.Join(bits, "\t"))
// Footer
footer := []string{"X F"}
- for range 7 {
+ for i := 0; i < 7; i++ {
footer = append(footer, "U S O B")
}
fmt.Fprintln(w, strings.Join(footer, "\t"))
diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go
index caa6dfcfe..1fb76c50e 100644
--- a/cmd/frostfs-cli/modules/util/convert_eacl.go
+++ b/cmd/frostfs-cli/modules/util/convert_eacl.go
@@ -6,17 +6,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
-const (
- fromFlagStr = "from"
- toFlagStr = "to"
- apeFlagStr = "ape"
-)
-
var convertEACLCmd = &cobra.Command{
Use: "eacl",
Short: "Convert representation of extended ACL table",
@@ -26,35 +18,24 @@ var convertEACLCmd = &cobra.Command{
func initConvertEACLCmd() {
flags := convertEACLCmd.Flags()
- flags.String(fromFlagStr, "", "File with JSON or binary encoded extended ACL table")
- _ = convertEACLCmd.MarkFlagFilename(fromFlagStr)
- _ = convertEACLCmd.MarkFlagRequired(fromFlagStr)
+ flags.String("from", "", "File with JSON or binary encoded extended ACL table")
+ _ = convertEACLCmd.MarkFlagFilename("from")
+ _ = convertEACLCmd.MarkFlagRequired("from")
- flags.String(toFlagStr, "", "File to dump extended ACL table (default: binary encoded)")
+ flags.String("to", "", "File to dump extended ACL table (default: binary encoded)")
flags.Bool(commonflags.JSON, false, "Dump extended ACL table in JSON encoding")
-
- flags.Bool(apeFlagStr, false, "Dump converted eACL table to APE chain format")
-
- convertEACLCmd.MarkFlagsMutuallyExclusive(apeFlagStr, commonflags.JSON)
}
func convertEACLTable(cmd *cobra.Command, _ []string) {
- pathFrom := cmd.Flag(fromFlagStr).Value.String()
- to := cmd.Flag(toFlagStr).Value.String()
+ pathFrom := cmd.Flag("from").Value.String()
+ to := cmd.Flag("to").Value.String()
jsonFlag, _ := cmd.Flags().GetBool(commonflags.JSON)
- apeFlag, _ := cmd.Flags().GetBool(apeFlagStr)
table := common.ReadEACL(cmd, pathFrom)
var data []byte
var err error
-
- if apeFlag {
- var ch *chain.Chain
- ch, err = apeutil.ConvertEACLToAPE(table)
- commonCmd.ExitOnErr(cmd, "convert eACL table to APE chain error: %w", err)
- data = ch.Bytes()
- } else if jsonFlag || len(to) == 0 {
+ if jsonFlag || len(to) == 0 {
data, err = table.MarshalJSON()
commonCmd.ExitOnErr(cmd, "can't JSON encode extended ACL table: %w", err)
} else {
@@ -67,7 +48,7 @@ func convertEACLTable(cmd *cobra.Command, _ []string) {
return
}
- err = os.WriteFile(to, data, 0o644)
+ err = os.WriteFile(to, data, 0644)
commonCmd.ExitOnErr(cmd, "can't write exteded ACL table to file: %w", err)
cmd.Printf("extended ACL table was successfully dumped to %s\n", to)
diff --git a/cmd/frostfs-cli/modules/util/keyer.go b/cmd/frostfs-cli/modules/util/keyer.go
index ee2497348..53082a15e 100644
--- a/cmd/frostfs-cli/modules/util/keyer.go
+++ b/cmd/frostfs-cli/modules/util/keyer.go
@@ -78,7 +78,7 @@ func keyerGenerate(filename string, d *keyer.Dashboard) error {
}
if filename != "" {
- return os.WriteFile(filename, key, 0o600)
+ return os.WriteFile(filename, key, 0600)
}
return nil
diff --git a/cmd/frostfs-cli/modules/util/locode.go b/cmd/frostfs-cli/modules/util/locode.go
new file mode 100644
index 000000000..a1f0f4d3f
--- /dev/null
+++ b/cmd/frostfs-cli/modules/util/locode.go
@@ -0,0 +1,18 @@
+package util
+
+import (
+ "github.com/spf13/cobra"
+)
+
+// locode section.
+var locodeCmd = &cobra.Command{
+ Use: "locode",
+ Short: "Working with FrostFS UN/LOCODE database",
+}
+
+func initLocodeCmd() {
+ locodeCmd.AddCommand(locodeGenerateCmd, locodeInfoCmd)
+
+ initUtilLocodeInfoCmd()
+ initUtilLocodeGenerateCmd()
+}
diff --git a/cmd/frostfs-cli/modules/util/locode_generate.go b/cmd/frostfs-cli/modules/util/locode_generate.go
new file mode 100644
index 000000000..319dee1c6
--- /dev/null
+++ b/cmd/frostfs-cli/modules/util/locode_generate.go
@@ -0,0 +1,96 @@
+package util
+
+import (
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ airportsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/airports"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
+ continentsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/continents/geojson"
+ csvlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/table/csv"
+ "github.com/spf13/cobra"
+)
+
+type namesDB struct {
+ *airportsdb.DB
+ *csvlocode.Table
+}
+
+const (
+ locodeGenerateInputFlag = "in"
+ locodeGenerateSubDivFlag = "subdiv"
+ locodeGenerateAirportsFlag = "airports"
+ locodeGenerateCountriesFlag = "countries"
+ locodeGenerateContinentsFlag = "continents"
+ locodeGenerateOutputFlag = "out"
+)
+
+var (
+ locodeGenerateInPaths []string
+ locodeGenerateSubDivPath string
+ locodeGenerateAirportsPath string
+ locodeGenerateCountriesPath string
+ locodeGenerateContinentsPath string
+ locodeGenerateOutPath string
+
+ locodeGenerateCmd = &cobra.Command{
+ Use: "generate",
+ Short: "Generate UN/LOCODE database for FrostFS",
+ Run: func(cmd *cobra.Command, _ []string) {
+ locodeDB := csvlocode.New(
+ csvlocode.Prm{
+ Path: locodeGenerateInPaths[0],
+ SubDivPath: locodeGenerateSubDivPath,
+ },
+ csvlocode.WithExtraPaths(locodeGenerateInPaths[1:]...),
+ )
+
+ airportDB := airportsdb.New(airportsdb.Prm{
+ AirportsPath: locodeGenerateAirportsPath,
+ CountriesPath: locodeGenerateCountriesPath,
+ })
+
+ continentsDB := continentsdb.New(continentsdb.Prm{
+ Path: locodeGenerateContinentsPath,
+ })
+
+ targetDB := locodebolt.New(locodebolt.Prm{
+ Path: locodeGenerateOutPath,
+ })
+
+ err := targetDB.Open()
+ commonCmd.ExitOnErr(cmd, "", err)
+
+ defer targetDB.Close()
+
+ names := &namesDB{
+ DB: airportDB,
+ Table: locodeDB,
+ }
+
+ err = locodedb.FillDatabase(locodeDB, airportDB, continentsDB, names, targetDB)
+ commonCmd.ExitOnErr(cmd, "", err)
+ },
+ }
+)
+
+func initUtilLocodeGenerateCmd() {
+ flags := locodeGenerateCmd.Flags()
+
+ flags.StringSliceVar(&locodeGenerateInPaths, locodeGenerateInputFlag, nil, "List of paths to UN/LOCODE tables (csv)")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateInputFlag)
+
+ flags.StringVar(&locodeGenerateSubDivPath, locodeGenerateSubDivFlag, "", "Path to UN/LOCODE subdivision database (csv)")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateSubDivFlag)
+
+ flags.StringVar(&locodeGenerateAirportsPath, locodeGenerateAirportsFlag, "", "Path to OpenFlights airport database (csv)")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateAirportsFlag)
+
+ flags.StringVar(&locodeGenerateCountriesPath, locodeGenerateCountriesFlag, "", "Path to OpenFlights country database (csv)")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateCountriesFlag)
+
+ flags.StringVar(&locodeGenerateContinentsPath, locodeGenerateContinentsFlag, "", "Path to continent polygons (GeoJSON)")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateContinentsFlag)
+
+ flags.StringVar(&locodeGenerateOutPath, locodeGenerateOutputFlag, "", "Target path for generated database")
+ _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateOutputFlag)
+}
diff --git a/cmd/frostfs-cli/modules/util/locode_info.go b/cmd/frostfs-cli/modules/util/locode_info.go
new file mode 100644
index 000000000..e89252dea
--- /dev/null
+++ b/cmd/frostfs-cli/modules/util/locode_info.go
@@ -0,0 +1,56 @@
+package util
+
+import (
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
+ "github.com/spf13/cobra"
+)
+
+const (
+ locodeInfoDBFlag = "db"
+ locodeInfoCodeFlag = "locode"
+)
+
+var (
+ locodeInfoDBPath string
+ locodeInfoCode string
+
+ locodeInfoCmd = &cobra.Command{
+ Use: "info",
+ Short: "Print information about UN/LOCODE from FrostFS database",
+ Run: func(cmd *cobra.Command, _ []string) {
+ targetDB := locodebolt.New(locodebolt.Prm{
+ Path: locodeInfoDBPath,
+ }, locodebolt.ReadOnly())
+
+ err := targetDB.Open()
+ commonCmd.ExitOnErr(cmd, "", err)
+
+ defer targetDB.Close()
+
+ record, err := locodedb.LocodeRecord(targetDB, locodeInfoCode)
+ commonCmd.ExitOnErr(cmd, "", err)
+
+ cmd.Printf("Country: %s\n", record.CountryName())
+ cmd.Printf("Location: %s\n", record.LocationName())
+ cmd.Printf("Continent: %s\n", record.Continent())
+ if subDivCode := record.SubDivCode(); subDivCode != "" {
+ cmd.Printf("Subdivision: [%s] %s\n", subDivCode, record.SubDivName())
+ }
+
+ geoPoint := record.GeoPoint()
+ cmd.Printf("Coordinates: %0.2f, %0.2f\n", geoPoint.Latitude(), geoPoint.Longitude())
+ },
+ }
+)
+
+func initUtilLocodeInfoCmd() {
+ flags := locodeInfoCmd.Flags()
+
+ flags.StringVar(&locodeInfoDBPath, locodeInfoDBFlag, "", "Path to FrostFS UN/LOCODE database")
+ _ = locodeInfoCmd.MarkFlagRequired(locodeInfoDBFlag)
+
+ flags.StringVar(&locodeInfoCode, locodeInfoCodeFlag, "", "UN/LOCODE")
+ _ = locodeInfoCmd.MarkFlagRequired(locodeInfoCodeFlag)
+}
diff --git a/cmd/frostfs-cli/modules/util/root.go b/cmd/frostfs-cli/modules/util/root.go
index a909e6899..3cdd236d4 100644
--- a/cmd/frostfs-cli/modules/util/root.go
+++ b/cmd/frostfs-cli/modules/util/root.go
@@ -9,7 +9,7 @@ import (
var Cmd = &cobra.Command{
Use: "util",
Short: "Utility operations",
- PersistentPreRun: func(cmd *cobra.Command, _ []string) {
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
flags := cmd.Flags()
_ = viper.BindPFlag(commonflags.GenerateKey, flags.Lookup(commonflags.GenerateKey))
@@ -23,9 +23,11 @@ func init() {
signCmd,
convertCmd,
keyerCmd,
+ locodeCmd,
)
initSignCmd()
initConvertCmd()
initKeyerCmd()
+ initLocodeCmd()
}
diff --git a/cmd/frostfs-cli/modules/util/sign_bearer.go b/cmd/frostfs-cli/modules/util/sign_bearer.go
index 991216958..c641cf0ac 100644
--- a/cmd/frostfs-cli/modules/util/sign_bearer.go
+++ b/cmd/frostfs-cli/modules/util/sign_bearer.go
@@ -56,7 +56,7 @@ func signBearerToken(cmd *cobra.Command, _ []string) {
return
}
- err = os.WriteFile(to, data, 0o644)
+ err = os.WriteFile(to, data, 0644)
commonCmd.ExitOnErr(cmd, "can't write signed bearer token to file: %w", err)
cmd.Printf("signed bearer token was successfully dumped to %s\n", to)
diff --git a/cmd/frostfs-cli/modules/util/sign_session.go b/cmd/frostfs-cli/modules/util/sign_session.go
index ba76678dc..2c77ee4df 100644
--- a/cmd/frostfs-cli/modules/util/sign_session.go
+++ b/cmd/frostfs-cli/modules/util/sign_session.go
@@ -76,7 +76,7 @@ func signSessionToken(cmd *cobra.Command, _ []string) {
return
}
- err = os.WriteFile(to, data, 0o644)
+ err = os.WriteFile(to, data, 0644)
if err != nil {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't write signed session token to %s: %w", to, err))
}
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 13a747ba6..54c7d18e3 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -1,24 +1,19 @@
package main
import (
- "context"
"os"
"os/signal"
- "strconv"
"syscall"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/spf13/cast"
"github.com/spf13/viper"
"go.uber.org/zap"
)
func newConfig() (*viper.Viper, error) {
var err error
- dv := viper.New()
+ var dv = viper.New()
defaultConfiguration(dv)
@@ -39,92 +34,46 @@ func reloadConfig() error {
if err != nil {
return err
}
- cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
- audit.Store(cfg.GetBool("audit.enabled"))
- var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
- err = logPrm.SetTags(loggerTags())
- if err != nil {
- return err
- }
- logger.UpdateLevelForTags(logPrm)
-
- return nil
+ return logPrm.Reload()
}
-func loggerTags() [][]string {
- var res [][]string
- for i := 0; ; i++ {
- var item []string
- index := strconv.FormatInt(int64(i), 10)
- names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
- if names == "" {
- break
- }
- item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
- res = append(res, item)
- }
- return res
-}
-
-func watchForSignal(ctx context.Context, cancel func()) {
+func watchForSignal(cancel func()) {
ch := make(chan os.Signal, 1)
- signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
-
- sighupCh := make(chan os.Signal, 1)
- signal.Notify(sighupCh, syscall.SIGHUP)
+ signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
for {
select {
- // signals causing application to shut down should have priority over
- // reconfiguration signal
- case <-ch:
- log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ case err := <-intErr:
+ log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
- shutdown(ctx)
- log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ shutdown()
return
- case err := <-intErr: // internal application error
- log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
- cancel()
- shutdown(ctx)
- return
- default:
- // block until any signal is receieved
- select {
- case <-ch:
- log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
- cancel()
- shutdown(ctx)
- log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
- return
- case err := <-intErr: // internal application error
- log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
- cancel()
- shutdown(ctx)
- return
- case <-sighupCh:
- log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
- break
- }
+ case sig := <-ch:
+ switch sig {
+ case syscall.SIGHUP:
+ log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
err := reloadConfig()
if err != nil {
- log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- pprofCmp.reload(ctx)
- metricsCmp.reload(ctx)
- log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
+ pprofCmp.reload()
+ metricsCmp.reload()
+ log.Info(logs.FrostFSIRReloadExtraWallets)
err = innerRing.SetExtraWallets(cfg)
if err != nil {
- log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
- innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
- log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ case syscall.SIGTERM, syscall.SIGINT:
+ log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ cancel()
+ shutdown()
+ log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+ return
}
}
}
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index 9b775252f..a7fe8d563 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -8,8 +8,6 @@ import (
func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("logger.level", "info")
- cfg.SetDefault("logger.destination", "stdout")
- cfg.SetDefault("logger.timestamp", false)
setPprofDefaults(cfg)
@@ -44,12 +42,6 @@ func defaultConfiguration(cfg *viper.Viper) {
setControlDefaults(cfg)
cfg.SetDefault("governance.disable", false)
-
- cfg.SetDefault("node.kludge_compatibility_mode", false)
-
- cfg.SetDefault("audit.enabled", false)
-
- setMultinetDefaults(cfg)
}
func setControlDefaults(cfg *viper.Viper) {
@@ -133,11 +125,3 @@ func setMorphDefaults(cfg *viper.Viper) {
cfg.SetDefault("morph.validators", []string{})
cfg.SetDefault("morph.switch_interval", 2*time.Minute)
}
-
-func setMultinetDefaults(cfg *viper.Viper) {
- cfg.SetDefault("multinet.enabled", false)
- cfg.SetDefault("multinet.balancer", "")
- cfg.SetDefault("multinet.restrict", false)
- cfg.SetDefault("multinet.fallback_delay", "0s")
- cfg.SetDefault("multinet.subnets", "")
-}
diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go
index dd70fc91c..3a6d77d84 100644
--- a/cmd/frostfs-ir/httpcomponent.go
+++ b/cmd/frostfs-ir/httpcomponent.go
@@ -1,7 +1,7 @@
package main
import (
- "context"
+ "fmt"
"net/http"
"time"
@@ -25,8 +25,8 @@ const (
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
)
-func (c *httpComponent) init(ctx context.Context) {
- log.Info(ctx, "init "+c.name)
+func (c *httpComponent) init() {
+ log.Info(fmt.Sprintf("init %s", c.name))
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
c.address = cfg.GetString(c.name + addressKeyPostfix)
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@@ -40,14 +40,14 @@ func (c *httpComponent) init(ctx context.Context) {
httputil.WithShutdownTimeout(c.shutdownDur),
)
} else {
- log.Info(ctx, c.name+" is disabled, skip")
+ log.Info(fmt.Sprintf("%s is disabled, skip", c.name))
c.srv = nil
}
}
-func (c *httpComponent) start(ctx context.Context) {
+func (c *httpComponent) start() {
if c.srv != nil {
- log.Info(ctx, "start "+c.name)
+ log.Info(fmt.Sprintf("start %s", c.name))
wg.Add(1)
go func() {
defer wg.Done()
@@ -56,10 +56,10 @@ func (c *httpComponent) start(ctx context.Context) {
}
}
-func (c *httpComponent) shutdown(ctx context.Context) error {
+func (c *httpComponent) shutdown() error {
if c.srv != nil {
- log.Info(ctx, "shutdown "+c.name)
- return c.srv.Shutdown(ctx)
+ log.Info(fmt.Sprintf("shutdown %s", c.name))
+ return c.srv.Shutdown()
}
return nil
}
@@ -71,17 +71,17 @@ func (c *httpComponent) needReload() bool {
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
}
-func (c *httpComponent) reload(ctx context.Context) {
- log.Info(ctx, "reload "+c.name)
+func (c *httpComponent) reload() {
+ log.Info(fmt.Sprintf("reload %s", c.name))
if c.needReload() {
- log.Info(ctx, c.name+" config updated")
- if err := c.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+ log.Info(fmt.Sprintf("%s config updated", c.name))
+ if err := c.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
} else {
- c.init(ctx)
- c.start(ctx)
+ c.init()
+ c.start()
}
}
}
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 799feb784..70199b094 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -6,14 +6,11 @@ import (
"fmt"
"os"
"sync"
- "sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- irMetrics "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -31,6 +28,7 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
+ logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@@ -38,8 +36,6 @@ var (
cfg *viper.Viper
configFile *string
configDir *string
- cmode = &atomic.Bool{}
- audit = &atomic.Bool{}
)
func exitErr(err error) {
@@ -65,74 +61,55 @@ func main() {
cfg, err = newConfig()
exitErr(err)
- cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
-
- metrics := irMetrics.NewInnerRingMetrics()
-
- var logPrm logger.Prm
+ logPrm.MetricsNamespace = "frostfs_ir"
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)
exitErr(err)
- err = logPrm.SetDestination(
- cfg.GetString("logger.destination"),
- )
- exitErr(err)
- logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
- logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
- err = logPrm.SetTags(loggerTags())
- exitErr(err)
log, err = logger.NewLogger(logPrm)
exitErr(err)
- logger.UpdateLevelForTags(logPrm)
-
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()
- pprofCmp.init(ctx)
+ pprofCmp.init()
metricsCmp = newMetricsComponent()
- metricsCmp.init(ctx)
- audit.Store(cfg.GetBool("audit.enabled"))
+ metricsCmp.init()
- innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
+ innerRing, err = innerring.New(ctx, log, cfg, intErr)
exitErr(err)
- pprofCmp.start(ctx)
- metricsCmp.start(ctx)
+ pprofCmp.start()
+ metricsCmp.start()
// start inner ring
err = innerRing.Start(ctx, intErr)
exitErr(err)
- log.Info(ctx, logs.CommonApplicationStarted,
+ log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
- watchForSignal(ctx, cancel)
+ watchForSignal(cancel)
<-ctx.Done() // graceful shutdown
- log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
+ log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
wg.Wait()
- log.Info(ctx, logs.FrostFSIRApplicationStopped)
+ log.Info(logs.FrostFSIRApplicationStopped)
}
-func shutdown(ctx context.Context) {
- innerRing.Stop(ctx)
- if err := metricsCmp.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+func shutdown() {
+ innerRing.Stop()
+ if err := metricsCmp.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
}
- if err := pprofCmp.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err),
+ if err := pprofCmp.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()),
)
}
-
- if err := sdnotify.ClearStatus(); err != nil {
- log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
- }
}
diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go
index 2aebcde7f..d67c463fc 100644
--- a/cmd/frostfs-ir/pprof.go
+++ b/cmd/frostfs-ir/pprof.go
@@ -1,7 +1,7 @@
package main
import (
- "context"
+ "fmt"
"runtime"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -29,8 +29,8 @@ func newPprofComponent() *pprofComponent {
}
}
-func (c *pprofComponent) init(ctx context.Context) {
- c.httpComponent.init(ctx)
+func (c *pprofComponent) init() {
+ c.httpComponent.init()
if c.enabled {
c.blockRate = cfg.GetInt(pprofBlockRateKey)
@@ -52,17 +52,17 @@ func (c *pprofComponent) needReload() bool {
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
}
-func (c *pprofComponent) reload(ctx context.Context) {
- log.Info(ctx, "reload "+c.name)
+func (c *pprofComponent) reload() {
+ log.Info(fmt.Sprintf("reload %s", c.name))
if c.needReload() {
- log.Info(ctx, c.name+" config updated")
- if err := c.shutdown(ctx); err != nil {
- log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
- zap.Error(err))
+ log.Info(fmt.Sprintf("%s config updated", c.name))
+ if err := c.shutdown(); err != nil {
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+ zap.String("error", err.Error()))
return
}
- c.init(ctx)
- c.start(ctx)
+ c.init()
+ c.start()
}
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
index e7e2c0769..b1a6e3fd2 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
@@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
- defer blz.Close(cmd.Context())
+ defer blz.Close()
var prm blobovnicza.GetPrm
prm.SetAddress(addr)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go
index d41a15bcf..d327dbc41 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/list.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/list.go
@@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
}
blz := openBlobovnicza(cmd)
- defer blz.Close(cmd.Context())
+ defer blz.Close()
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go
index 2819981d6..0a0cd955d 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/root.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/root.go
@@ -19,7 +19,7 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD, tuiCMD)
+ Root.AddCommand(listCMD, inspectCMD)
}
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
@@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
blobovnicza.WithPath(vPath),
blobovnicza.WithReadOnly(true),
)
- common.ExitOnErr(cmd, blz.Open(cmd.Context()))
+ common.ExitOnErr(cmd, blz.Open())
return blz
}
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
deleted file mode 100644
index 4aa281616..000000000
--- a/cmd/frostfs-lens/internal/blobovnicza/tui.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package blobovnicza
-
-import (
- "context"
- "fmt"
-
- common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
- "github.com/rivo/tview"
- "github.com/spf13/cobra"
-)
-
-var tuiCMD = &cobra.Command{
- Use: "explore",
- Short: "Blobovnicza exploration with a terminal UI",
- Long: `Launch a terminal UI to explore blobovnicza and search for data.
-
-Available search filters:
-- cid CID
-- oid OID
-- addr CID/OID
-`,
- Run: tuiFunc,
-}
-
-var initialPrompt string
-
-func init() {
- common.AddComponentPathFlag(tuiCMD, &vPath)
-
- tuiCMD.Flags().StringVar(
- &initialPrompt,
- "filter",
- "",
- "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
- )
-}
-
-func tuiFunc(cmd *cobra.Command, _ []string) {
- common.ExitOnErr(cmd, runTUI(cmd))
-}
-
-func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
- if err != nil {
- return fmt.Errorf("couldn't open database: %w", err)
- }
- defer db.Close()
-
- ctx, cancel := context.WithCancel(cmd.Context())
- defer cancel()
-
- app := tview.NewApplication()
- ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil)
-
- _ = ui.AddFilter("cid", tui.CIDParser, "CID")
- _ = ui.AddFilter("oid", tui.OIDParser, "OID")
- _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
-
- err = ui.WithPrompt(initialPrompt)
- if err != nil {
- return fmt.Errorf("invalid filter prompt: %w", err)
- }
-
- app.SetRoot(ui, true).SetFocus(ui)
- return app.Run()
-}
diff --git a/cmd/frostfs-lens/internal/flags.go b/cmd/frostfs-lens/internal/flags.go
index 8a987a2d4..004519167 100644
--- a/cmd/frostfs-lens/internal/flags.go
+++ b/cmd/frostfs-lens/internal/flags.go
@@ -8,6 +8,7 @@ const (
flagAddress = "address"
flagEnginePath = "path"
flagOutFile = "out"
+ flagDBType = "dbtype"
)
// AddAddressFlag adds the address flag to the passed cobra command.
@@ -33,3 +34,9 @@ func AddOutputFileFlag(cmd *cobra.Command, v *string) {
"File to save object payload")
_ = cmd.MarkFlagFilename(flagOutFile)
}
+
+// AddDBTypeFlag adds the DB type flag to the passed cobra command.
+func AddDBTypeFlag(cmd *cobra.Command, v *string) {
+ cmd.Flags().StringVar(v, flagDBType, "bbolt",
+ "Type of DB used by write cache (default: bbolt)")
+}
diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go
index f436343c7..de0f24aeb 100644
--- a/cmd/frostfs-lens/internal/meta/inspect.go
+++ b/cmd/frostfs-lens/internal/meta/inspect.go
@@ -5,7 +5,7 @@ import (
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
@@ -40,7 +40,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
if id := resStorageID.StorageID(); id != nil {
- cmd.Printf("Object storageID: %s\n\n", blobovniczatree.NewIDFromBytes(id).Path())
+ cmd.Printf("Object storageID: %s\n\n", blobovnicza.NewIDFromBytes(id).String())
} else {
cmd.Printf("Object does not contain storageID\n\n")
}
diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go
index 6b27a232f..61b10ca1f 100644
--- a/cmd/frostfs-lens/internal/meta/list-garbage.go
+++ b/cmd/frostfs-lens/internal/meta/list-garbage.go
@@ -19,7 +19,7 @@ func init() {
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go
index 45642e74b..19a93691c 100644
--- a/cmd/frostfs-lens/internal/meta/list-graveyard.go
+++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go
@@ -19,7 +19,7 @@ func init() {
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
- defer db.Close(cmd.Context())
+ defer db.Close()
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(
diff --git a/cmd/frostfs-lens/internal/meta/root.go b/cmd/frostfs-lens/internal/meta/root.go
index 351d1ce80..a59574b6c 100644
--- a/cmd/frostfs-lens/internal/meta/root.go
+++ b/cmd/frostfs-lens/internal/meta/root.go
@@ -5,7 +5,6 @@ import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
@@ -32,7 +31,6 @@ func init() {
inspectCMD,
listGraveyardCMD,
listGarbageCMD,
- tuiCMD,
)
}
@@ -45,7 +43,7 @@ func openMeta(cmd *cobra.Command) *meta.DB {
}),
meta.WithEpochState(epochState{}),
)
- common.ExitOnErr(cmd, common.Errf("could not open metabase: %w", db.Open(cmd.Context(), mode.ReadOnly)))
+ common.ExitOnErr(cmd, common.Errf("could not open metabase: %w", db.Open(cmd.Context(), true)))
return db
}
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
deleted file mode 100644
index 7b0e25f3d..000000000
--- a/cmd/frostfs-lens/internal/meta/tui.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package meta
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
-
- common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
- "github.com/rivo/tview"
- "github.com/spf13/cobra"
- "go.etcd.io/bbolt"
-)
-
-var tuiCMD = &cobra.Command{
- Use: "explore",
- Short: "Metabase exploration with a terminal UI",
- Long: `Launch a terminal UI to explore metabase and search for data.
-
-Available search filters:
-- cid CID
-- oid OID
-- addr CID/OID
-- attr key[/value]
-`,
- Run: tuiFunc,
-}
-
-var initialPrompt string
-
-var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
- 2: schema.MetabaseParserV2,
- 3: schema.MetabaseParserV3,
-}
-
-func init() {
- common.AddComponentPathFlag(tuiCMD, &vPath)
-
- tuiCMD.Flags().StringVar(
- &initialPrompt,
- "filter",
- "",
- "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
- )
-}
-
-func tuiFunc(cmd *cobra.Command, _ []string) {
- common.ExitOnErr(cmd, runTUI(cmd))
-}
-
-func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
- if err != nil {
- return fmt.Errorf("couldn't open database: %w", err)
- }
- defer db.Close()
-
- schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
- if !hasVersion {
- return errors.New("couldn't detect schema version")
- }
-
- metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
- if !ok {
- return fmt.Errorf("unknown schema version %d", schemaVersion)
- }
-
- // Need if app was stopped with Ctrl-C.
- ctx, cancel := context.WithCancel(cmd.Context())
- defer cancel()
-
- app := tview.NewApplication()
- ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
-
- _ = ui.AddFilter("cid", tui.CIDParser, "CID")
- _ = ui.AddFilter("oid", tui.OIDParser, "OID")
- _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
- _ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]")
-
- err = ui.WithPrompt(initialPrompt)
- if err != nil {
- return fmt.Errorf("invalid filter prompt: %w", err)
- }
-
- app.SetRoot(ui, true).SetFocus(ui)
- return app.Run()
-}
-
-var (
- shardInfoBucket = []byte{5}
- versionRecord = []byte("version")
-)
-
-func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
- err := db.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(shardInfoBucket)
- if bkt == nil {
- return nil
- }
- rec := bkt.Get(versionRecord)
- if rec == nil {
- return nil
- }
-
- version = binary.LittleEndian.Uint64(rec)
- ok = true
-
- return nil
- })
- if err != nil {
- common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
- }
-
- return
-}
diff --git a/cmd/frostfs-lens/internal/printers.go b/cmd/frostfs-lens/internal/printers.go
index ea0cbc8e0..dd73a5552 100644
--- a/cmd/frostfs-lens/internal/printers.go
+++ b/cmd/frostfs-lens/internal/printers.go
@@ -59,7 +59,7 @@ func WriteObjectToFile(cmd *cobra.Command, path string, data []byte) {
}
ExitOnErr(cmd, Errf("could not write file: %w",
- os.WriteFile(path, data, 0o644)))
+ os.WriteFile(path, data, 0644)))
cmd.Printf("\nSaved payload to '%s' file\n", path)
}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
deleted file mode 100644
index 02b6cf414..000000000
--- a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package blobovnicza
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
-)
-
-var BlobovniczaParser = common.WithFallback(
- common.Any(
- MetaBucketParser,
- BucketParser,
- ),
- common.RawParser.ToFallbackParser(),
-)
-
-func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, errors.New("not a bucket")
- }
-
- if string(key) != "META" {
- return nil, nil, errors.New("invalid bucket name")
- }
-
- return &MetaBucket{}, MetaRecordParser, nil
-}
-
-func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var r MetaRecord
-
- if len(key) == 0 {
- return nil, nil, errors.New("invalid key")
- }
-
- r.label = string(key)
- r.count = binary.LittleEndian.Uint64(value)
-
- return &r, nil, nil
-}
-
-func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, errors.New("not a bucket")
- }
-
- size, n := binary.Varint(key)
- if n <= 0 {
- return nil, nil, errors.New("invalid size")
- }
-
- return &Bucket{size: size}, RecordParser, nil
-}
-
-func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- parts := strings.Split(string(key), "/")
-
- if len(parts) != 2 {
- return nil, nil, errors.New("invalid key, expected address string /")
- }
-
- cnrRaw, err := base58.Decode(parts[0])
- if err != nil {
- return nil, nil, errors.New("can't decode CID string")
- }
- objRaw, err := base58.Decode(parts[1])
- if err != nil {
- return nil, nil, errors.New("can't decode OID string")
- }
-
- cnr := cid.ID{}
- if err := cnr.Decode(cnrRaw); err != nil {
- return nil, nil, fmt.Errorf("can't decode CID: %w", err)
- }
- obj := oid.ID{}
- if err := obj.Decode(objRaw); err != nil {
- return nil, nil, fmt.Errorf("can't decode OID: %w", err)
- }
-
- var r Record
-
- r.addr.SetContainer(cnr)
- r.addr.SetObject(obj)
-
- if err := r.object.Unmarshal(value); err != nil {
- return nil, nil, errors.New("can't unmarshal object")
- }
-
- return &r, nil, nil
-}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
deleted file mode 100644
index c7ed08cdd..000000000
--- a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package blobovnicza
-
-import (
- "fmt"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/davecgh/go-spew/spew"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type (
- MetaBucket struct{}
-
- MetaRecord struct {
- label string
- count uint64
- }
-
- Bucket struct {
- size int64
- }
-
- Record struct {
- addr oid.Address
- object objectSDK.Object
- }
-)
-
-func (b *MetaBucket) String() string {
- return common.FormatSimple("META", tcell.ColorLime)
-}
-
-func (b *MetaBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *MetaBucket) Filter(string, any) common.FilterResult {
- return common.No
-}
-
-func (r *MetaRecord) String() string {
- return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count)
-}
-
-func (r *MetaRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *MetaRecord) Filter(string, any) common.FilterResult {
- return common.No
-}
-
-func (b *Bucket) String() string {
- return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime)
-}
-
-func (b *Bucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *Bucket) Filter(typ string, _ any) common.FilterResult {
- switch typ {
- case "cid":
- return common.Maybe
- case "oid":
- return common.Maybe
- default:
- return common.No
- }
-}
-
-func (r *Record) String() string {
- return fmt.Sprintf(
- "CID %s OID %s %c Object {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
- tview.Borders.Vertical,
- )
-}
-
-func (r *Record) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *Record) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/common/format.go b/cmd/frostfs-lens/internal/schema/common/format.go
deleted file mode 100644
index 4ed7e96f2..000000000
--- a/cmd/frostfs-lens/internal/schema/common/format.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package common
-
-import (
- "fmt"
- "strconv"
-
- "github.com/gdamore/tcell/v2"
-)
-
-type FormatOptions struct {
- Color tcell.Color
-
- Bold,
- Italic,
- Underline,
- StrikeThrough bool
-}
-
-func Format(s string, opts FormatOptions) string {
- var boldTag, italicTag, underlineTag, strikeThroughTag string
-
- switch {
- case opts.Bold:
- boldTag = "b"
- case opts.Italic:
- italicTag = "i"
- case opts.Underline:
- underlineTag = "u"
- case opts.StrikeThrough:
- strikeThroughTag = "s"
- }
-
- attrs := fmt.Sprintf(
- "%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag,
- )
- color := strconv.FormatInt(int64(opts.Color.Hex()), 16)
-
- return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s)
-}
-
-func FormatSimple(s string, c tcell.Color) string {
- return Format(s, FormatOptions{Color: c})
-}
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
deleted file mode 100644
index 55051554c..000000000
--- a/cmd/frostfs-lens/internal/schema/common/raw.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package common
-
-import (
- "github.com/davecgh/go-spew/spew"
- "github.com/gdamore/tcell/v2"
- "github.com/mr-tron/base58"
-)
-
-type RawEntry struct {
- // key and value used for record dump.
- // nolint:unused
- key, value []byte
-}
-
-var RawParser Parser = rawParser
-
-func rawParser(key, value []byte) (SchemaEntry, Parser, error) {
- return &RawEntry{key: key, value: value}, rawParser, nil
-}
-
-func (r *RawEntry) String() string {
- return FormatSimple(base58.Encode(r.key), tcell.ColorRed)
-}
-
-func (r *RawEntry) DetailedString() string {
- return spew.Sdump(r)
-}
-
-func (r *RawEntry) Filter(string, any) FilterResult {
- return No
-}
diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go
deleted file mode 100644
index 077a68785..000000000
--- a/cmd/frostfs-lens/internal/schema/common/schema.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package common
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
-)
-
-type FilterResult byte
-
-const (
- No FilterResult = iota
- Maybe
- Yes
-)
-
-func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult {
- var res FilterResult
- if condition {
- res = onSuccess
- } else {
- res = onFailure
- }
- return res
-}
-
-type SchemaEntry interface {
- String() string
- DetailedString() string
- Filter(typ string, val any) FilterResult
-}
-
-type (
- Parser func(key, value []byte) (SchemaEntry, Parser, error)
- FallbackParser func(key, value []byte) (SchemaEntry, Parser)
-)
-
-func Any(parsers ...Parser) Parser {
- return func(key, value []byte) (SchemaEntry, Parser, error) {
- var errs error
- for _, parser := range parsers {
- ret, next, err := parser(key, value)
- if err == nil {
- return ret, next, nil
- }
- errs = errors.Join(errs, err)
- }
- return nil, nil, fmt.Errorf("no parser succeeded: %w", errs)
- }
-}
-
-func WithFallback(parser Parser, fallback FallbackParser) Parser {
- if parser == nil {
- return fallback.ToParser()
- }
- return func(key, value []byte) (SchemaEntry, Parser, error) {
- entry, next, err := parser(key, value)
- if err == nil {
- return entry, WithFallback(next, fallback), nil
- }
- return fallback.ToParser()(key, value)
- }
-}
-
-func (fp FallbackParser) ToParser() Parser {
- return func(key, value []byte) (SchemaEntry, Parser, error) {
- entry, next := fp(key, value)
- return entry, next, nil
- }
-}
-
-func (p Parser) ToFallbackParser() FallbackParser {
- return func(key, value []byte) (SchemaEntry, Parser) {
- entry, next, err := p(key, value)
- assert.NoError(err, "couldn't use that parser as a fallback parser")
- return entry, next
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
deleted file mode 100644
index 6a08a723e..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package buckets
-
-import (
- "github.com/davecgh/go-spew/spew"
-)
-
-func (b *PrefixBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *PrefixContainerBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *UserBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *ContainerBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *UserAttributeKeyBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (b *UserAttributeValueBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
deleted file mode 100644
index 891c4004f..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package buckets
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
-)
-
-func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult {
- switch typ {
- case "cid":
- return b.resolvers.cidResolver(false)
- case "oid":
- return b.resolvers.oidResolver(false)
- default:
- return common.No
- }
-}
-
-func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return b.resolvers.cidResolver(b.id.Equals(id))
- case "oid":
- return b.resolvers.oidResolver(false)
- default:
- return common.No
- }
-}
-
-func (b *UserBucket) Filter(typ string, _ any) common.FilterResult {
- switch typ {
- case "cid":
- return b.resolvers.cidResolver(false)
- case "oid":
- return b.resolvers.oidResolver(false)
- default:
- return common.No
- }
-}
-
-func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return b.resolvers.cidResolver(b.id.Equals(id))
- case "oid":
- return b.resolvers.oidResolver(false)
- default:
- return common.No
- }
-}
-
-func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(b.id.Equals(id), common.Yes, common.No)
- case "oid":
- return common.Maybe
- case "key":
- key := val.(string)
- return common.IfThenElse(b.key == key, common.Yes, common.No)
- case "value":
- return common.Maybe
- default:
- return common.No
- }
-}
-
-func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- return common.Maybe
- case "value":
- value := val.(string)
- return common.IfThenElse(b.value == value, common.Yes, common.No)
- default:
- return common.No
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
deleted file mode 100644
index 4e6bbf08a..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package buckets
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records"
-)
-
-var (
- GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: LenientResolver,
- })
-
- GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: LenientResolver,
- })
-
- ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: StrictResolver,
- })
-
- LockedParser = NewPrefixBucketParser(
- Locked,
- NewContainerBucketParser(
- records.LockedRecordParser,
- Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- },
- ),
- Resolvers{
- cidResolver: LenientResolver,
- oidResolver: LenientResolver,
- },
- )
-
- ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: StrictResolver,
- })
-
- PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- OwnerParser = NewPrefixContainerBucketParser(
- Owner,
- NewUserBucketParser(
- records.OwnerRecordParser,
- Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- },
- ),
- Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- },
- )
-
- UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
- NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
- )
-
- UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
- NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
- []string{"FilePath", "S3-Access-Box-CRDT-Name"},
- )
-
- PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: StrictResolver,
- })
-
- ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: StrictResolver,
- })
-
- ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: StrictResolver,
- })
-
- ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-
- ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
- cidResolver: LenientResolver,
- oidResolver: LenientResolver,
- })
-
- ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
- cidResolver: StrictResolver,
- oidResolver: LenientResolver,
- })
-)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
deleted file mode 100644
index 42a24c594..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package buckets
-
-type Prefix byte
-
-const (
- Graveyard Prefix = iota
- Garbage
- ToMoveIt
- ContainerVolume
- Locked
- ShardInfo
- Primary
- Lockers
- _
- Tombstone
- Small
- Root
- Owner
- UserAttribute
- PayloadHash
- Parent
- Split
- ContainerCounters
- ECInfo
- ExpirationEpochToObject
- ObjectToExpirationEpoch
-)
-
-var x = map[Prefix]string{
- Graveyard: "Graveyard",
- Garbage: "Garbage",
- ToMoveIt: "To Move It",
- ContainerVolume: "Container Volume",
- Locked: "Locked",
- ShardInfo: "Shard Info",
- Primary: "Primary",
- Lockers: "Lockers",
- Tombstone: "Tombstone",
- Small: "Small",
- Root: "Root",
- Owner: "Owner",
- UserAttribute: "User Attribute",
- PayloadHash: "Payload Hash",
- Parent: "Parent",
- Split: "Split",
- ContainerCounters: "Container Counters",
- ECInfo: "EC Info",
- ExpirationEpochToObject: "Exp. Epoch to Object",
- ObjectToExpirationEpoch: "Object to Exp. Epoch",
-}
-
-func (p Prefix) String() string {
- if s, ok := x[p]; ok {
- return s
- }
- return "Unknown Prefix"
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
deleted file mode 100644
index 62d126f88..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package buckets
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "github.com/gdamore/tcell/v2"
-)
-
-func (b *PrefixBucket) String() string {
- return common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
- )
-}
-
-func (b *PrefixContainerBucket) String() string {
- return fmt.Sprintf(
- "%s CID %s",
- common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
- ),
- common.FormatSimple(b.id.String(), tcell.ColorAqua),
- )
-}
-
-func (b *UserBucket) String() string {
- return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
-}
-
-func (b *ContainerBucket) String() string {
- return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
-}
-
-func (b *UserAttributeKeyBucket) String() string {
- return fmt.Sprintf("%s CID %s ATTR-KEY %s",
- common.FormatSimple(
- fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
- ),
- common.FormatSimple(
- fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
- ),
- common.FormatSimple(b.key, tcell.ColorAqua),
- )
-}
-
-func (b *UserAttributeValueBucket) String() string {
- return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua)
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
deleted file mode 100644
index 7355c3d9e..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package buckets
-
-import (
- "errors"
- "slices"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/mr-tron/base58"
-)
-
-type (
- PrefixBucket struct {
- prefix Prefix
- resolvers Resolvers
- }
-
- PrefixContainerBucket struct {
- prefix Prefix
- id cid.ID
- resolvers Resolvers
- }
-
- ContainerBucket struct {
- id cid.ID
- resolvers Resolvers
- }
-
- UserBucket struct {
- id user.ID
- resolvers Resolvers
- }
-
- UserAttributeKeyBucket struct {
- prefix Prefix
- id cid.ID
- key string
- }
-
- UserAttributeValueBucket struct {
- value string
- }
-)
-
-type (
- FilterResolver = func(result bool) common.FilterResult
-
- Resolvers struct {
- cidResolver FilterResolver
- oidResolver FilterResolver
- }
-)
-
-var (
- StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) }
- LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) }
-)
-
-var (
- ErrNotBucket = errors.New("not a bucket")
- ErrInvalidKeyLength = errors.New("invalid key length")
- ErrInvalidValueLength = errors.New("invalid value length")
- ErrInvalidPrefix = errors.New("invalid prefix")
- ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
-)
-
-func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- if len(key) != 1 {
- return nil, nil, ErrInvalidKeyLength
- }
- var b PrefixBucket
- if b.prefix = Prefix(key[0]); b.prefix != prefix {
- return nil, nil, ErrInvalidPrefix
- }
- b.resolvers = resolvers
- return &b, next, nil
- }
-}
-
-func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- if len(key) != 33 {
- return nil, nil, ErrInvalidKeyLength
- }
- var b PrefixContainerBucket
- if b.prefix = Prefix(key[0]); b.prefix != prefix {
- return nil, nil, ErrInvalidPrefix
- }
- if err := b.id.Decode(key[1:]); err != nil {
- return nil, nil, err
- }
- b.resolvers = resolvers
- return &b, next, nil
- }
-}
-
-func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- var b UserBucket
- if err := b.id.DecodeString(base58.Encode(key)); err != nil {
- return nil, nil, err
- }
- b.resolvers = resolvers
- return &b, next, nil
- }
-}
-
-func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- var b ContainerBucket
- if err := b.id.Decode(key); err != nil {
- return nil, nil, err
- }
- b.resolvers = resolvers
- return &b, next, nil
- }
-}
-
-func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
- return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
-}
-
-func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- if len(key) < 34 {
- return nil, nil, ErrInvalidKeyLength
- }
- var b UserAttributeKeyBucket
- if b.prefix = Prefix(key[0]); b.prefix != UserAttribute {
- return nil, nil, ErrInvalidPrefix
- }
- if err := b.id.Decode(key[1:33]); err != nil {
- return nil, nil, err
- }
- b.key = string(key[33:])
-
- if len(keys) != 0 && !slices.Contains(keys, b.key) {
- return nil, nil, ErrUnexpectedAttributeKey
- }
-
- return &b, next, nil
- }
-}
-
-func NewUserAttributeValueBucketParser(next common.Parser) common.Parser {
- return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, ErrNotBucket
- }
- if len(key) == 0 {
- return nil, nil, ErrInvalidKeyLength
- }
- var b UserAttributeValueBucket
- b.value = string(key)
- return &b, next, nil
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go
deleted file mode 100644
index 4cc9e8765..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/parser.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package metabase
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
-)
-
-var MetabaseParserV3 = common.WithFallback(
- common.Any(
- buckets.GraveyardParser,
- buckets.GarbageParser,
- buckets.ContainerVolumeParser,
- buckets.LockedParser,
- buckets.ShardInfoParser,
- buckets.PrimaryParser,
- buckets.LockersParser,
- buckets.TombstoneParser,
- buckets.SmallParser,
- buckets.RootParser,
- buckets.UserAttributeParserV3,
- buckets.ParentParser,
- buckets.SplitParser,
- buckets.ContainerCountersParser,
- buckets.ECInfoParser,
- buckets.ExpirationEpochToObjectParser,
- buckets.ObjectToExpirationEpochParser,
- ),
- common.RawParser.ToFallbackParser(),
-)
-
-var MetabaseParserV2 = common.WithFallback(
- common.Any(
- buckets.GraveyardParser,
- buckets.GarbageParser,
- buckets.ContainerVolumeParser,
- buckets.LockedParser,
- buckets.ShardInfoParser,
- buckets.PrimaryParser,
- buckets.LockersParser,
- buckets.TombstoneParser,
- buckets.SmallParser,
- buckets.RootParser,
- buckets.OwnerParser,
- buckets.UserAttributeParserV2,
- buckets.PayloadHashParser,
- buckets.ParentParser,
- buckets.SplitParser,
- buckets.ContainerCountersParser,
- buckets.ECInfoParser,
- ),
- common.RawParser.ToFallbackParser(),
-)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
deleted file mode 100644
index 477c4fc9d..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package records
-
-import (
- "github.com/davecgh/go-spew/spew"
-)
-
-func (r *GraveyardRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *GarbageRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ContainerVolumeRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *LockedRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ShardInfoRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ObjectRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *SmallRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *RootRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *OwnerRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *UserAttributeRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *PayloadHashRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ParentRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *SplitRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ContainerCountersRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ECInfoRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ExpirationEpochToObjectRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (r *ObjectToExpirationEpochRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
deleted file mode 100644
index e038911d7..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package records
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ShardInfoRecord) Filter(string, any) common.FilterResult {
- return common.No
-}
-
-func (r *LockedRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *SmallRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *RootRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *PayloadHashRecord) Filter(string, any) common.FilterResult {
- return common.No
-}
-
-func (r *ParentRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *SplitRecord) Filter(string, any) common.FilterResult {
- return common.No
-}
-
-func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
-
-func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
deleted file mode 100644
index 5d846cb75..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package records
-
-import (
- "encoding/binary"
- "errors"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-var (
- ErrInvalidKeyLength = errors.New("invalid key length")
- ErrInvalidValueLength = errors.New("invalid value length")
- ErrInvalidPrefix = errors.New("invalid prefix")
-)
-
-func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 64 {
- return nil, nil, ErrInvalidKeyLength
- }
- if len(value) != 64 {
- return nil, nil, ErrInvalidValueLength
- }
- var (
- cnr cid.ID
- obj oid.ID
- r GraveyardRecord
- )
-
- _ = cnr.Decode(key[:32])
- _ = obj.Decode(key[32:])
-
- r.object.SetContainer(cnr)
- r.object.SetObject(obj)
-
- _ = cnr.Decode(value[:32])
- _ = obj.Decode(value[32:])
-
- r.tombstone.SetContainer(cnr)
- r.tombstone.SetObject(obj)
-
- return &r, nil, nil
-}
-
-func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 64 {
- return nil, nil, ErrInvalidKeyLength
- }
- var (
- cnr cid.ID
- obj oid.ID
- r GarbageRecord
- )
-
- _ = cnr.Decode(key[:32])
- _ = obj.Decode(key[32:])
-
- r.addr.SetContainer(cnr)
- r.addr.SetObject(obj)
-
- return &r, nil, nil
-}
-
-func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- if len(value) != 8 {
- return nil, nil, ErrInvalidValueLength
- }
- var r ContainerVolumeRecord
-
- _ = r.id.Decode(key)
- r.volume = binary.LittleEndian.Uint64(value)
-
- return &r, nil, nil
-}
-
-func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var (
- r LockedRecord
- err error
- )
-
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- if r.ids, err = DecodeOIDs(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) == 0 {
- return nil, nil, ErrInvalidKeyLength
- }
-
- var r ShardInfoRecord
- if string(key) == "id" {
- r.label = string(key)
- r.value = shard.ID(value).String()
-
- return &r, nil, nil
- }
-
- if len(value) != 8 {
- return nil, nil, ErrInvalidValueLength
- }
- r.label = string(key)
- r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10)
-
- return &r, nil, nil
-}
-
-func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- var r ObjectRecord
-
- _ = r.id.Decode(key)
- if err := r.object.Unmarshal(value); err != nil {
- return nil, nil, err
- }
-
- return &r, nil, nil
-}
-
-func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var r SmallRecord
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- if len(value) != 0 {
- x := string(value)
- r.storageID = &x
- }
- return &r, nil, nil
-}
-
-func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var r RootRecord
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- if len(value) == 0 {
- return &r, nil, nil
- }
- r.info = &objectSDK.SplitInfo{}
- if err := r.info.Unmarshal(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
- var r OwnerRecord
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
- var r UserAttributeRecord
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- var (
- err error
- r PayloadHashRecord
- )
-
- r.checksum.SetSHA256([32]byte(key))
- if r.ids, err = DecodeOIDs(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var (
- r ParentRecord
- err error
- )
- if err = r.parent.Decode(key); err != nil {
- return nil, nil, err
- }
- if r.ids, err = DecodeOIDs(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var (
- err error
- r SplitRecord
- )
- if err = r.id.UnmarshalBinary(key); err != nil {
- return nil, nil, err
- }
- if r.ids, err = DecodeOIDs(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(value) != 24 {
- return nil, nil, ErrInvalidValueLength
- }
-
- var r ContainerCountersRecord
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
-
- r.logical = binary.LittleEndian.Uint64(value[:8])
- r.physical = binary.LittleEndian.Uint64(value[8:16])
- r.user = binary.LittleEndian.Uint64(value[16:24])
-
- return &r, nil, nil
-}
-
-func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- var (
- r ECInfoRecord
- err error
- )
-
- if err := r.id.Decode(key); err != nil {
- return nil, nil, err
- }
- if r.ids, err = DecodeOIDs(value); err != nil {
- return nil, nil, err
- }
- return &r, nil, nil
-}
-
-func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 72 {
- return nil, nil, ErrInvalidKeyLength
- }
-
- var (
- r ExpirationEpochToObjectRecord
- err error
- )
-
- r.epoch = binary.BigEndian.Uint64(key[:8])
- if err = r.cnt.Decode(key[8:40]); err != nil {
- return nil, nil, err
- }
- if err = r.obj.Decode(key[40:]); err != nil {
- return nil, nil, err
- }
-
- return &r, nil, nil
-}
-
-func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if len(key) != 32 {
- return nil, nil, ErrInvalidKeyLength
- }
- if len(value) != 8 {
- return nil, nil, ErrInvalidValueLength
- }
-
- var (
- r ObjectToExpirationEpochRecord
- err error
- )
-
- if err = r.obj.Decode(key); err != nil {
- return nil, nil, err
- }
- r.epoch = binary.LittleEndian.Uint64(value)
-
- return &r, nil, nil
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
deleted file mode 100644
index f71244625..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package records
-
-import (
- "fmt"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-func (r *GraveyardRecord) String() string {
- return fmt.Sprintf(
- "Object CID %s OID %s %c Tombstone CID %s OID %s",
- common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua),
- tview.Borders.Vertical,
- common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua),
- )
-}
-
-func (r *GarbageRecord) String() string {
- return fmt.Sprintf(
- "CID %-44s OID %-44s",
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
- )
-}
-
-func (r *ContainerVolumeRecord) String() string {
- return fmt.Sprintf(
- "CID %-44s %c %d",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- r.volume,
- )
-}
-
-func (r *LockedRecord) String() string {
- return fmt.Sprintf(
- "Object OID %s %c Lockers [%d]OID {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- len(r.ids),
- )
-}
-
-func (r *ShardInfoRecord) String() string {
- return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value)
-}
-
-func (r *ObjectRecord) String() string {
- return fmt.Sprintf(
- "OID %s %c Object {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- )
-}
-
-func (r *SmallRecord) String() string {
- s := fmt.Sprintf(
- "OID %s %c",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- )
- if r.storageID != nil {
- s = fmt.Sprintf("%s %s", s, *r.storageID)
- }
- return s
-}
-
-func (r *RootRecord) String() string {
- s := fmt.Sprintf(
- "Root OID %s %c",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- )
- if r.info != nil {
- s += " Split info {...}"
- }
- return s
-}
-
-func (r *OwnerRecord) String() string {
- return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
-}
-
-func (r *UserAttributeRecord) String() string {
- return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
-}
-
-func (r *PayloadHashRecord) String() string {
- return fmt.Sprintf(
- "Checksum %s %c [%d]OID {...}",
- common.FormatSimple(r.checksum.String(), tcell.ColorAqua),
- tview.Borders.Vertical,
- len(r.ids),
- )
-}
-
-func (r *ParentRecord) String() string {
- return fmt.Sprintf(
- "Parent OID %s %c [%d]OID {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua),
- tview.Borders.Vertical,
- len(r.ids),
- )
-}
-
-func (r *SplitRecord) String() string {
- return fmt.Sprintf(
- "Split ID %s %c [%d]OID {...}",
- common.FormatSimple(r.id.String(), tcell.ColorAqua),
- tview.Borders.Vertical,
- len(r.ids),
- )
-}
-
-func (r *ContainerCountersRecord) String() string {
- return fmt.Sprintf(
- "CID %s %c logical %d, physical %d, user %d",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- r.logical, r.physical, r.user,
- )
-}
-
-func (r *ECInfoRecord) String() string {
- return fmt.Sprintf(
- "OID %s %c [%d]OID {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
- tview.Borders.Vertical,
- len(r.ids),
- )
-}
-
-func (r *ExpirationEpochToObjectRecord) String() string {
- return fmt.Sprintf(
- "exp. epoch %s %c CID %s OID %s",
- common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
- tview.Borders.Vertical,
- common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
- )
-}
-
-func (r *ObjectToExpirationEpochRecord) String() string {
- return fmt.Sprintf(
- "OID %s %c exp. epoch %s",
- common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
- tview.Borders.Vertical,
- common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
- )
-}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
deleted file mode 100644
index 0809cad1a..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package records
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/google/uuid"
-)
-
-type (
- GraveyardRecord struct {
- object, tombstone oid.Address
- }
-
- GarbageRecord struct {
- addr oid.Address
- }
-
- ContainerVolumeRecord struct {
- id cid.ID
- volume uint64
- }
-
- LockedRecord struct {
- id oid.ID
- ids []oid.ID
- }
-
- ShardInfoRecord struct {
- label string
- value string
- }
-
- ObjectRecord struct {
- id oid.ID
- object objectSDK.Object
- }
-
- SmallRecord struct {
- id oid.ID
- storageID *string // optional
- }
-
- RootRecord struct {
- id oid.ID
- info *objectSDK.SplitInfo // optional
- }
-
- OwnerRecord struct {
- id oid.ID
- }
-
- UserAttributeRecord struct {
- id oid.ID
- }
-
- PayloadHashRecord struct {
- checksum checksum.Checksum
- ids []oid.ID
- }
-
- ParentRecord struct {
- parent oid.ID
- ids []oid.ID
- }
-
- SplitRecord struct {
- id uuid.UUID
- ids []oid.ID
- }
-
- ContainerCountersRecord struct {
- id cid.ID
- logical, physical, user uint64
- }
-
- ECInfoRecord struct {
- id oid.ID
- ids []oid.ID
- }
-
- ExpirationEpochToObjectRecord struct {
- epoch uint64
- cnt cid.ID
- obj oid.ID
- }
-
- ObjectToExpirationEpochRecord struct {
- obj oid.ID
- epoch uint64
- }
-)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
deleted file mode 100644
index d15d69146..000000000
--- a/cmd/frostfs-lens/internal/schema/metabase/records/util.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package records
-
-import (
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/io"
-)
-
-func DecodeOIDs(data []byte) ([]oid.ID, error) {
- r := io.NewBinReaderFromBuf(data)
-
- size := r.ReadVarUint()
- oids := make([]oid.ID, size)
-
- for i := range size {
- if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
- return nil, err
- }
- }
- return oids, nil
-}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
deleted file mode 100644
index 3bfe2608b..000000000
--- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package writecache
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
-)
-
-var WritecacheParser = common.WithFallback(
- DefaultBucketParser,
- common.RawParser.ToFallbackParser(),
-)
-
-func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- if value != nil {
- return nil, nil, errors.New("not a bucket")
- }
- if !bytes.Equal(key, []byte{0}) {
- return nil, nil, errors.New("invalid key")
- }
- return &DefaultBucket{}, DefaultRecordParser, nil
-}
-
-func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
- parts := strings.Split(string(key), "/")
-
- if len(parts) != 2 {
- return nil, nil, errors.New("invalid key, expected address string /")
- }
-
- cnrRaw, err := base58.Decode(parts[0])
- if err != nil {
- return nil, nil, errors.New("can't decode CID string")
- }
- objRaw, err := base58.Decode(parts[1])
- if err != nil {
- return nil, nil, errors.New("can't decode OID string")
- }
-
- cnr := cid.ID{}
- if err := cnr.Decode(cnrRaw); err != nil {
- return nil, nil, fmt.Errorf("can't decode CID: %w", err)
- }
- obj := oid.ID{}
- if err := obj.Decode(objRaw); err != nil {
- return nil, nil, fmt.Errorf("can't decode OID: %w", err)
- }
-
- var r DefaultRecord
-
- r.addr.SetContainer(cnr)
- r.addr.SetObject(obj)
-
- r.data = value
-
- return &r, nil, nil
-}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
deleted file mode 100644
index 11e6f3fcd..000000000
--- a/cmd/frostfs-lens/internal/schema/writecache/types.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package writecache
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/davecgh/go-spew/spew"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type (
- DefaultBucket struct{}
-
- DefaultRecord struct {
- addr oid.Address
- // data used for record dump.
- // nolint:unused
- data []byte
- }
-)
-
-func (b *DefaultBucket) String() string {
- return common.FormatSimple("0 Default", tcell.ColorLime)
-}
-
-func (r *DefaultRecord) String() string {
- return fmt.Sprintf(
- "CID %s OID %s %c Data {...}",
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
- common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
- tview.Borders.Vertical,
- )
-}
-
-func (b *DefaultBucket) DetailedString() string {
- return spew.Sdump(*b)
-}
-
-func (r *DefaultRecord) DetailedString() string {
- return spew.Sdump(*r)
-}
-
-func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult {
- switch typ {
- case "cid":
- return common.Maybe
- case "oid":
- return common.Maybe
- default:
- return common.No
- }
-}
-
-func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult {
- switch typ {
- case "cid":
- id := val.(cid.ID)
- return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
- case "oid":
- id := val.(oid.ID)
- return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
- default:
- return common.No
- }
-}
diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go
deleted file mode 100644
index 2d3b20792..000000000
--- a/cmd/frostfs-lens/internal/tui/buckets.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package tui
-
-import (
- "context"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type BucketsView struct {
- *tview.Box
-
- mu sync.Mutex
-
- view *tview.TreeView
- nodeToUpdate *tview.TreeNode
-
- ui *UI
- filter *Filter
-}
-
-type bucketNode struct {
- bucket *Bucket
- filter *Filter
-}
-
-func NewBucketsView(ui *UI, filter *Filter) *BucketsView {
- return &BucketsView{
- Box: tview.NewBox(),
- view: tview.NewTreeView(),
- ui: ui,
- filter: filter,
- }
-}
-
-func (v *BucketsView) Mount(_ context.Context) error {
- root := tview.NewTreeNode(".")
- root.SetExpanded(false)
- root.SetSelectable(false)
- root.SetReference(&bucketNode{
- bucket: &Bucket{NextParser: v.ui.rootParser},
- filter: v.filter,
- })
-
- v.nodeToUpdate = root
-
- v.view.SetRoot(root)
- v.view.SetCurrentNode(root)
-
- return nil
-}
-
-func (v *BucketsView) Update(ctx context.Context) error {
- if v.nodeToUpdate == nil {
- return nil
- }
- defer func() { v.nodeToUpdate = nil }()
-
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- ready := make(chan struct{})
- errCh := make(chan error)
-
- tmp := tview.NewTreeNode(v.nodeToUpdate.GetText())
- tmp.SetReference(v.nodeToUpdate.GetReference())
-
- node := v.nodeToUpdate.GetReference().(*bucketNode)
-
- go func() {
- defer close(ready)
-
- hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path)
- if err != nil {
- errCh <- err
- }
-
- // Show the selected bucket's records instead.
- if !hasBuckets && node.bucket.NextParser != nil {
- v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter))
- }
-
- if v.nodeToUpdate.IsExpanded() {
- return
- }
-
- err = v.loadNodeChildren(ctx, tmp, node.filter)
- if err != nil {
- errCh <- err
- }
- }()
-
- select {
- case <-ctx.Done():
- case <-ready:
- v.mu.Lock()
- v.nodeToUpdate.SetChildren(tmp.GetChildren())
- v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded())
- v.mu.Unlock()
- case err := <-errCh:
- return err
- }
-
- return nil
-}
-
-func (v *BucketsView) Unmount() {
-}
-
-func (v *BucketsView) Draw(screen tcell.Screen) {
- x, y, width, height := v.GetInnerRect()
- v.view.SetRect(x, y, width, height)
-
- v.view.Draw(screen)
-}
-
-func (v *BucketsView) loadNodeChildren(
- ctx context.Context, node *tview.TreeNode, filter *Filter,
-) error {
- parentBucket := node.GetReference().(*bucketNode).bucket
-
- path := parentBucket.Path
- parser := parentBucket.NextParser
-
- buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
-
- for item := range buffer {
- if item.err != nil {
- return item.err
- }
- bucket := item.val
-
- var err error
- bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
- if err != nil {
- return err
- }
-
- satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter)
- if err != nil {
- return err
- }
- if !satisfies {
- continue
- }
-
- child := tview.NewTreeNode(bucket.Entry.String()).
- SetSelectable(true).
- SetExpanded(false).
- SetReference(&bucketNode{
- bucket: bucket,
- filter: filter.Apply(bucket.Entry),
- })
-
- node.AddChild(child)
- }
-
- return nil
-}
-
-func (v *BucketsView) bucketSatisfiesFilter(
- ctx context.Context, bucket *Bucket, filter *Filter,
-) (bool, error) {
- // Does the current bucket satisfies the filter?
- filter = filter.Apply(bucket.Entry)
-
- if filter.Result() == common.Yes {
- return true, nil
- }
-
- if filter.Result() == common.No {
- return false, nil
- }
-
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- // Check the current bucket's nested buckets if exist
- bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
-
- for item := range bucketsBuffer {
- if item.err != nil {
- return false, item.err
- }
- b := item.val
-
- var err error
- b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
- if err != nil {
- return false, err
- }
-
- satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter)
- if err != nil {
- return false, err
- }
- if satisfies {
- return true, nil
- }
- }
-
- // Check the current bucket's nested records if exist
- recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
-
- for item := range recordsBuffer {
- if item.err != nil {
- return false, item.err
- }
- r := item.val
-
- var err error
- r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
- if err != nil {
- return false, err
- }
-
- if filter.Apply(r.Entry).Result() == common.Yes {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
- return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
- currentNode := v.view.GetCurrentNode()
- if currentNode == nil {
- return
- }
-
- switch event.Key() {
- case tcell.KeyEnter:
- // Expand or collapse the selected bucket's nested buckets,
- // otherwise, navigate to that bucket's records.
- v.nodeToUpdate = currentNode
- case tcell.KeyCtrlR:
- // Navigate to the selected bucket's records.
- bucketNode := currentNode.GetReference().(*bucketNode)
- v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter))
- case tcell.KeyCtrlD:
- // Navigate to the selected bucket's detailed view.
- bucketNode := currentNode.GetReference().(*bucketNode)
- v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString()))
- default:
- v.view.InputHandler()(event, func(tview.Primitive) {})
- }
- })
-}
diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go
deleted file mode 100644
index 94fa87f98..000000000
--- a/cmd/frostfs-lens/internal/tui/db.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package tui
-
-import (
- "context"
- "errors"
- "fmt"
-
- "go.etcd.io/bbolt"
-)
-
-type Item[T any] struct {
- val T
- err error
-}
-
-func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
- if len(path) == 0 {
- return nil, errors.New("can't find bucket without path")
- }
-
- name := path[0]
- bucket := tx.Bucket(name)
- if bucket == nil {
- return nil, fmt.Errorf("no bucket with name %s", name)
- }
- for _, name := range path[1:] {
- bucket = bucket.Bucket(name)
- if bucket == nil {
- return nil, fmt.Errorf("no bucket with name %s", name)
- }
- }
- return bucket, nil
-}
-
-func load[T any](
- ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
- filter func(key, value []byte) bool, transform func(key, value []byte) T,
-) <-chan Item[T] {
- buffer := make(chan Item[T], bufferSize)
-
- go func() {
- defer close(buffer)
-
- err := db.View(func(tx *bbolt.Tx) error {
- var cursor *bbolt.Cursor
- if len(path) == 0 {
- cursor = tx.Cursor()
- } else {
- bucket, err := resolvePath(tx, path)
- if err != nil {
- buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)}
- return nil
- }
- cursor = bucket.Cursor()
- }
-
- key, value := cursor.First()
- for {
- if key == nil {
- return nil
- }
- if filter != nil && !filter(key, value) {
- key, value = cursor.Next()
- continue
- }
-
- select {
- case <-ctx.Done():
- return nil
- case buffer <- Item[T]{val: transform(key, value)}:
- key, value = cursor.Next()
- }
- }
- })
- if err != nil {
- buffer <- Item[T]{err: err}
- }
- }()
-
- return buffer
-}
-
-func LoadBuckets(
- ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) <-chan Item[*Bucket] {
- buffer := load(
- ctx, db, path, bufferSize,
- func(_, value []byte) bool {
- return value == nil
- },
- func(key, _ []byte) *Bucket {
- base := make([][]byte, 0, len(path))
- base = append(base, path...)
-
- return &Bucket{
- Name: key,
- Path: append(base, key),
- }
- },
- )
-
- return buffer
-}
-
-func LoadRecords(
- ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
-) <-chan Item[*Record] {
- buffer := load(
- ctx, db, path, bufferSize,
- func(_, value []byte) bool {
- return value != nil
- },
- func(key, value []byte) *Record {
- base := make([][]byte, 0, len(path))
- base = append(base, path...)
-
- return &Record{
- Key: key,
- Value: value,
- Path: append(base, key),
- }
- },
- )
-
- return buffer
-}
-
-// HasBuckets checks if a bucket has nested buckets. It relies on assumption
-// that a bucket can have either nested buckets or records but not both.
-func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) {
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- buffer := load(
- ctx, db, path, 1,
- nil,
- func(_, value []byte) []byte { return value },
- )
-
- x, ok := <-buffer
- if !ok {
- return false, nil
- }
- if x.err != nil {
- return false, x.err
- }
- if x.val != nil {
- return false, nil
- }
- return true, nil
-}
diff --git a/cmd/frostfs-lens/internal/tui/detailed.go b/cmd/frostfs-lens/internal/tui/detailed.go
deleted file mode 100644
index b2d897230..000000000
--- a/cmd/frostfs-lens/internal/tui/detailed.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package tui
-
-import (
- "context"
-
- "github.com/rivo/tview"
-)
-
-type DetailedView struct {
- *tview.TextView
-}
-
-func NewDetailedView(detailed string) *DetailedView {
- v := &DetailedView{
- TextView: tview.NewTextView(),
- }
- v.SetDynamicColors(true)
- v.SetText(detailed)
- return v
-}
-
-func (v *DetailedView) Mount(_ context.Context) error { return nil }
-func (v *DetailedView) Update(_ context.Context) error { return nil }
-func (v *DetailedView) Unmount() {}
diff --git a/cmd/frostfs-lens/internal/tui/filter.go b/cmd/frostfs-lens/internal/tui/filter.go
deleted file mode 100644
index e7879eca7..000000000
--- a/cmd/frostfs-lens/internal/tui/filter.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package tui
-
-import (
- "maps"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
-)
-
-type Filter struct {
- values map[string]any
- results map[string]common.FilterResult
-}
-
-func NewFilter(values map[string]any) *Filter {
- f := &Filter{
- values: maps.Clone(values),
- results: make(map[string]common.FilterResult),
- }
- for tag := range values {
- f.results[tag] = common.No
- }
- return f
-}
-
-func (f *Filter) Apply(e common.SchemaEntry) *Filter {
- filter := &Filter{
- values: f.values,
- results: maps.Clone(f.results),
- }
-
- for tag, value := range filter.values {
- filter.results[tag] = max(filter.results[tag], e.Filter(tag, value))
- }
-
- return filter
-}
-
-func (f *Filter) Result() common.FilterResult {
- current := common.Yes
- for _, r := range f.results {
- current = min(r, current)
- }
- return current
-}
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
deleted file mode 100644
index c371b34e9..000000000
--- a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-[green::b]HOTKEYS[-::-]
-
- [green::b]Navigation[-::-]
-
- [yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-]
- Scroll down.
-
- [yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-]
- Scroll up.
-
- [yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-]
- Scroll down by a full page.
-
- [yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-]
- Scroll up by a full page.
-
- [green::b]Actions[-::-]
-
- [yellow::b]Enter[-::-]
- Perform actions based on the current context:
- - In Buckets View:
- - Expand/collapse the selected bucket to show/hide its nested buckets.
- - If no nested buckets exist, navigate to the selected bucket's records.
- - In Records View: Open the detailed view of the selected record.
-
- [yellow::b]Escape[-::-]
- Return to the previous page, opposite of [yellow::b]Enter[-::-].
-
- Refer to the [green::b]SEARCHING[-::-] section for more specific actions.
-
-
- [green::b]Alternative Action Hotkeys[-::-]
-
- [yellow::b]Ctrl-r[-::-]
- Directly navigate to the selected bucket's records.
-
- [yellow::b]Ctrl-d[-::-]
- Access the detailed view of the selected bucket.
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
deleted file mode 100644
index bc2be512b..000000000
--- a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-[green::b]SEARCHING[-::-]
-
- [green::b]Hotkeys[-::-]
-
- [yellow::b]/[-::-]
- Initiate the search prompt.
- - The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-]
- - Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets.
- - Any leading and trailing whitespace will be ignored.
- - An empty prompt will return all results with no filters applied.
- - Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags.
-
- [yellow::b]Enter[-::-]
- Execute the search based on the entered prompt.
- - If the prompt is invalid, an error message will be displayed.
-
- [yellow::b]Escape[-::-]
- Exit the search prompt without performing a search.
-
- [yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-]
- Scroll through the search history.
-
-
- [green::b]Available Search Filters[-::-]
-
-%s
diff --git a/cmd/frostfs-lens/internal/tui/help.go b/cmd/frostfs-lens/internal/tui/help.go
deleted file mode 100644
index 3ab8fede0..000000000
--- a/cmd/frostfs-lens/internal/tui/help.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package tui
-
-import (
- _ "embed"
- "fmt"
- "strings"
-
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-var (
- //go:embed help-pages/hotkeys.txt
- hotkeysHelpText string
-
- //go:embed help-pages/searching.txt
- searchingHelpText string
-)
-
-type HelpPage struct {
- *tview.Box
- pages []*tview.TextView
- currentPage int
-
- filters []string
- filterHints map[string]string
-}
-
-func NewHelpPage(filters []string, hints map[string]string) *HelpPage {
- hp := &HelpPage{
- Box: tview.NewBox(),
- filters: filters,
- filterHints: hints,
- }
-
- page := tview.NewTextView().
- SetDynamicColors(true).
- SetText(hotkeysHelpText)
- hp.addPage(page)
-
- page = tview.NewTextView().
- SetDynamicColors(true).
- SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText()))
- hp.addPage(page)
-
- return hp
-}
-
-func (hp *HelpPage) addPage(page *tview.TextView) {
- hp.pages = append(hp.pages, page)
-}
-
-func (hp *HelpPage) getFiltersText() string {
- if len(hp.filters) == 0 {
- return "\t\tNo filters defined.\n"
- }
-
- filtersText := strings.Builder{}
- gapSize := 4
-
- tagMaxWidth := 3
- for _, filter := range hp.filters {
- tagMaxWidth = max(tagMaxWidth, len(filter))
- }
- filtersText.WriteString("\t\t[yellow::b]Tag")
- filtersText.WriteString(strings.Repeat(" ", gapSize))
- filtersText.WriteString("\tValue[-::-]\n\n")
-
- for _, filter := range hp.filters {
- filtersText.WriteString("\t\t")
- filtersText.WriteString(filter)
- filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize))
- filtersText.WriteString(hp.filterHints[filter])
- filtersText.WriteRune('\n')
- }
-
- return filtersText.String()
-}
-
-func (hp *HelpPage) Draw(screen tcell.Screen) {
- x, y, width, height := hp.GetInnerRect()
- hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2)
- hp.pages[hp.currentPage].Draw(screen)
-}
-
-func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
- return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
- if event.Key() == tcell.KeyEnter {
- hp.currentPage++
- hp.currentPage %= len(hp.pages)
- return
- }
- hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {})
- })
-}
-
-func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) {
- return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) {
- return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {})
- })
-}
diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go
deleted file mode 100644
index 471514e5d..000000000
--- a/cmd/frostfs-lens/internal/tui/input.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package tui
-
-import (
- "slices"
-
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type InputFieldWithHistory struct {
- *tview.InputField
- history []string
- historyLimit int
- historyPointer int
- currentContent string
-}
-
-func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory {
- return &InputFieldWithHistory{
- InputField: tview.NewInputField(),
- historyLimit: historyLimit,
- }
-}
-
-func (f *InputFieldWithHistory) AddToHistory(s string) {
- // Stop scrolling history on history change, need to start scrolling again.
- defer func() { f.historyPointer = len(f.history) }()
-
- // Used history data for search prompt, so just make that data recent.
- if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
- f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
- f.history = append(f.history, s)
- }
-
- if len(f.history) == f.historyLimit {
- f.history = f.history[1:]
- }
- f.history = append(f.history, s)
-}
-
-func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
- return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
- switch event.Key() {
- case tcell.KeyDown:
- if len(f.history) == 0 {
- return
- }
- // Need to start iterating before.
- if f.historyPointer == len(f.history) {
- return
- }
- // Iterate to most recent prompts.
- f.historyPointer++
- // Stop iterating over history.
- if f.historyPointer == len(f.history) {
- f.SetText(f.currentContent)
- return
- }
- f.SetText(f.history[f.historyPointer])
- case tcell.KeyUp:
- if len(f.history) == 0 {
- return
- }
- // Start iterating over history.
- if f.historyPointer == len(f.history) {
- f.currentContent = f.GetText()
- }
- // End of history.
- if f.historyPointer == 0 {
- return
- }
- // Iterate to least recent prompts.
- f.historyPointer--
- f.SetText(f.history[f.historyPointer])
- default:
- f.InputField.InputHandler()(event, func(tview.Primitive) {})
- }
- })
-}
diff --git a/cmd/frostfs-lens/internal/tui/loading.go b/cmd/frostfs-lens/internal/tui/loading.go
deleted file mode 100644
index 4b9384ad4..000000000
--- a/cmd/frostfs-lens/internal/tui/loading.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package tui
-
-import (
- "context"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type LoadingBar struct {
- *tview.Box
- view *tview.TextView
- secondsElapsed atomic.Int64
- needDrawFunc func()
- reset func()
-}
-
-func NewLoadingBar(needDrawFunc func()) *LoadingBar {
- b := &LoadingBar{
- Box: tview.NewBox(),
- view: tview.NewTextView(),
- needDrawFunc: needDrawFunc,
- }
- b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor)
- b.view.SetTextColor(b.GetBackgroundColor())
-
- return b
-}
-
-func (b *LoadingBar) Start(ctx context.Context) {
- ctx, b.reset = context.WithCancel(ctx)
-
- go func() {
- ticker := time.NewTicker(1 * time.Second)
- defer ticker.Stop()
-
- b.secondsElapsed.Store(0)
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- b.secondsElapsed.Add(1)
- b.needDrawFunc()
- }
- }
- }()
-}
-
-func (b *LoadingBar) Stop() {
- b.reset()
-}
-
-func (b *LoadingBar) Draw(screen tcell.Screen) {
- seconds := b.secondsElapsed.Load()
-
- var time string
- switch {
- case seconds < 60:
- time = fmt.Sprintf("%ds", seconds)
- default:
- time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60)
- }
- b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time))
-
- x, y, width, _ := b.GetInnerRect()
- b.view.SetRect(x, y, width, 1)
- b.view.Draw(screen)
-}
diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go
deleted file mode 100644
index a4d392ab3..000000000
--- a/cmd/frostfs-lens/internal/tui/records.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package tui
-
-import (
- "context"
- "errors"
- "fmt"
- "math"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
-)
-
-type updateType int
-
-const (
- other updateType = iota
- moveToPrevPage
- moveToNextPage
- moveUp
- moveDown
- moveHome
- moveEnd
-)
-
-type RecordsView struct {
- *tview.Box
-
- mu sync.RWMutex
-
- onUnmount func()
-
- bucket *Bucket
- records []*Record
-
- buffer chan *Record
-
- firstRecordIndex int
- lastRecordIndex int
- selectedRecordIndex int
-
- updateType updateType
-
- ui *UI
- filter *Filter
-}
-
-func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView {
- return &RecordsView{
- Box: tview.NewBox(),
- bucket: bucket,
- ui: ui,
- filter: filter,
- }
-}
-
-func (v *RecordsView) Mount(ctx context.Context) error {
- if v.onUnmount != nil {
- return errors.New("try to mount already mounted component")
- }
-
- ctx, v.onUnmount = context.WithCancel(ctx)
-
- tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
-
- v.buffer = make(chan *Record, v.ui.loadBufferSize)
- go func() {
- defer close(v.buffer)
-
- for item := range tempBuffer {
- if item.err != nil {
- v.ui.stopOnError(item.err)
- break
- }
- record := item.val
-
- var err error
- record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
- if err != nil {
- v.ui.stopOnError(err)
- break
- }
-
- if v.filter.Apply(record.Entry).Result() != common.Yes {
- continue
- }
-
- v.buffer <- record
- }
- }()
-
- return nil
-}
-
-func (v *RecordsView) Unmount() {
- assert.False(v.onUnmount == nil, "try to unmount not mounted component")
- v.onUnmount()
- v.onUnmount = nil
-}
-
-func (v *RecordsView) Update(ctx context.Context) error {
- _, _, _, recordsPerPage := v.GetInnerRect()
- firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes()
-
-loop:
- for len(v.records) < lastRecordIndex {
- select {
- case <-ctx.Done():
- return nil
- case record, ok := <-v.buffer:
- if !ok {
- break loop
- }
- v.records = append(v.records, record)
- }
- }
-
- // Set the update type to its default value after some specific key event
- // has been handled.
- v.updateType = other
-
- firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage))
- lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
- selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1)
-
- v.mu.Lock()
- v.firstRecordIndex = firstRecordIndex
- v.lastRecordIndex = lastRecordIndex
- v.selectedRecordIndex = selectedRecordIndex
- v.mu.Unlock()
-
- return nil
-}
-
-func (v *RecordsView) getNewIndexes() (int, int, int) {
- v.mu.RLock()
- firstRecordIndex := v.firstRecordIndex
- lastRecordIndex := v.lastRecordIndex
- selectedRecordIndex := v.selectedRecordIndex
- v.mu.RUnlock()
-
- _, _, _, recordsPerPage := v.GetInnerRect()
-
- switch v.updateType {
- case moveUp:
- if selectedRecordIndex != firstRecordIndex {
- selectedRecordIndex--
- break
- }
- firstRecordIndex = max(0, firstRecordIndex-1)
- lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
- selectedRecordIndex = firstRecordIndex
- case moveToPrevPage:
- if selectedRecordIndex != firstRecordIndex {
- selectedRecordIndex = firstRecordIndex
- break
- }
- firstRecordIndex = max(0, firstRecordIndex-recordsPerPage)
- lastRecordIndex = firstRecordIndex + recordsPerPage
- selectedRecordIndex = firstRecordIndex
- case moveDown:
- if selectedRecordIndex != lastRecordIndex-1 {
- selectedRecordIndex++
- break
- }
- firstRecordIndex++
- lastRecordIndex++
- selectedRecordIndex++
- case moveToNextPage:
- if selectedRecordIndex != lastRecordIndex-1 {
- selectedRecordIndex = lastRecordIndex - 1
- break
- }
- firstRecordIndex += recordsPerPage
- lastRecordIndex = firstRecordIndex + recordsPerPage
- selectedRecordIndex = lastRecordIndex - 1
- case moveHome:
- firstRecordIndex = 0
- lastRecordIndex = firstRecordIndex + recordsPerPage
- selectedRecordIndex = 0
- case moveEnd:
- lastRecordIndex = math.MaxInt32
- firstRecordIndex = lastRecordIndex - recordsPerPage
- selectedRecordIndex = lastRecordIndex - 1
- default:
- lastRecordIndex = firstRecordIndex + recordsPerPage
- }
-
- return firstRecordIndex, lastRecordIndex, selectedRecordIndex
-}
-
-func (v *RecordsView) GetInnerRect() (int, int, int, int) {
- x, y, width, height := v.Box.GetInnerRect()
-
- // Left padding.
- x = min(x+3, x+width-1)
- width = max(width-3, 0)
-
- return x, y, width, height
-}
-
-func (v *RecordsView) Draw(screen tcell.Screen) {
- v.mu.RLock()
- firstRecordIndex := v.firstRecordIndex
- lastRecordIndex := v.lastRecordIndex
- selectedRecordIndex := v.selectedRecordIndex
- records := v.records
- v.mu.RUnlock()
-
- v.DrawForSubclass(screen, v)
-
- x, y, width, height := v.GetInnerRect()
- if height == 0 {
- return
- }
-
- // No records in that bucket.
- if firstRecordIndex == lastRecordIndex {
- tview.Print(
- screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor,
- )
- return
- }
-
- for index := firstRecordIndex; index < lastRecordIndex; index++ {
- result := records[index].Entry
- text := result.String()
-
- if index == selectedRecordIndex {
- text = fmt.Sprintf("[:white]%s[:-]", text)
- tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor)
- } else {
- tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor)
- }
-
- y++
- }
-}
-
-func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) {
- return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) {
- switch m, k := event.Modifiers(), event.Key(); {
- case m == 0 && k == tcell.KeyPgUp:
- v.updateType = moveToPrevPage
- case m == 0 && k == tcell.KeyPgDn:
- v.updateType = moveToNextPage
- case m == 0 && k == tcell.KeyUp:
- v.updateType = moveUp
- case m == 0 && k == tcell.KeyDown:
- v.updateType = moveDown
- case m == 0 && k == tcell.KeyHome:
- v.updateType = moveHome
- case m == 0 && k == tcell.KeyEnd:
- v.updateType = moveEnd
- case k == tcell.KeyEnter:
- v.mu.RLock()
- selectedRecordIndex := v.selectedRecordIndex
- records := v.records
- v.mu.RUnlock()
- if len(records) != 0 {
- current := records[selectedRecordIndex]
- v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString()))
- }
- }
- })
-}
diff --git a/cmd/frostfs-lens/internal/tui/types.go b/cmd/frostfs-lens/internal/tui/types.go
deleted file mode 100644
index 4a227fe64..000000000
--- a/cmd/frostfs-lens/internal/tui/types.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package tui
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
-)
-
-type Bucket struct {
- Name []byte
- Path [][]byte
- Entry common.SchemaEntry
- NextParser common.Parser
-}
-
-type Record struct {
- Key, Value []byte
- Path [][]byte
- Entry common.SchemaEntry
-}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
deleted file mode 100644
index cc6b7859e..000000000
--- a/cmd/frostfs-lens/internal/tui/ui.go
+++ /dev/null
@@ -1,561 +0,0 @@
-package tui
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
- "github.com/davecgh/go-spew/spew"
- "github.com/gdamore/tcell/v2"
- "github.com/rivo/tview"
- "go.etcd.io/bbolt"
-)
-
-type Config struct {
- LoadBufferSize int
- SearchHistorySize int
- LoadingIndicatorLag time.Duration
-}
-
-var DefaultConfig = Config{
- LoadBufferSize: 100,
- SearchHistorySize: 100,
- LoadingIndicatorLag: 500 * time.Millisecond,
-}
-
-type Primitive interface {
- tview.Primitive
-
- Mount(ctx context.Context) error
- Update(ctx context.Context) error
- Unmount()
-}
-
-type UI struct {
- *tview.Box
-
- // Need to use context while updating pages those read data from a database.
- // Context should be shared among all mount and updates. Current TUI library
- // doesn't use contexts at all, so I do that feature by myself.
- //nolint:containedctx
- ctx context.Context
- onStop func()
-
- app *tview.Application
- db *bbolt.DB
-
- pageHistory []Primitive
- mountedPage Primitive
-
- pageToMount Primitive
-
- pageStub tview.Primitive
-
- infoBar *tview.TextView
- searchBar *InputFieldWithHistory
- loadingBar *LoadingBar
- helpBar *tview.TextView
-
- helpPage *HelpPage
-
- searchErrorBar *tview.TextView
-
- isSearching bool
- isLoading atomic.Bool
- isShowingError bool
- isShowingHelp bool
-
- loadBufferSize int
-
- rootParser common.Parser
-
- loadingIndicatorLag time.Duration
-
- cancelLoading func()
-
- filters map[string]func(string) (any, error)
- compositeFilters map[string]func(string) (map[string]any, error)
- filterHints map[string]string
-}
-
-func NewUI(
- ctx context.Context,
- app *tview.Application,
- db *bbolt.DB,
- rootParser common.Parser,
- cfg *Config,
-) *UI {
- spew.Config.DisableMethods = true
-
- if cfg == nil {
- cfg = &DefaultConfig
- }
-
- ui := &UI{
- Box: tview.NewBox(),
-
- app: app,
- db: db,
- rootParser: rootParser,
-
- filters: make(map[string]func(string) (any, error)),
- compositeFilters: make(map[string]func(string) (map[string]any, error)),
- filterHints: make(map[string]string),
-
- loadBufferSize: cfg.LoadBufferSize,
- loadingIndicatorLag: cfg.LoadingIndicatorLag,
- }
-
- ui.ctx, ui.onStop = context.WithCancel(ctx)
-
- backgroundColor := ui.GetBackgroundColor()
- textColor := tview.Styles.PrimaryTextColor
-
- inverseBackgroundColor := textColor
- inverseTextColor := backgroundColor
-
- alertTextColor := tcell.ColorRed
-
- ui.pageStub = tview.NewBox()
-
- ui.infoBar = tview.NewTextView()
- ui.infoBar.SetBackgroundColor(inverseBackgroundColor)
- ui.infoBar.SetTextColor(inverseTextColor)
- ui.infoBar.SetText(
- fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()),
- )
-
- ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize)
- ui.searchBar.SetFieldBackgroundColor(backgroundColor)
- ui.searchBar.SetFieldTextColor(textColor)
- ui.searchBar.SetLabelColor(textColor)
- ui.searchBar.Focus(nil)
- ui.searchBar.SetLabel("/")
-
- ui.searchErrorBar = tview.NewTextView()
- ui.searchErrorBar.SetBackgroundColor(backgroundColor)
- ui.searchErrorBar.SetTextColor(alertTextColor)
-
- ui.helpBar = tview.NewTextView()
- ui.helpBar.SetBackgroundColor(inverseBackgroundColor)
- ui.helpBar.SetTextColor(inverseTextColor)
- ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ")
-
- ui.loadingBar = NewLoadingBar(ui.triggerDraw)
-
- ui.pageToMount = NewBucketsView(ui, NewFilter(nil))
-
- return ui
-}
-
-func (ui *UI) checkFilterExists(typ string) bool {
- if _, ok := ui.filters[typ]; ok {
- return true
- }
- if _, ok := ui.compositeFilters[typ]; ok {
- return true
- }
- return false
-}
-
-func (ui *UI) AddFilter(
- typ string,
- parser func(string) (any, error),
- helpHint string,
-) error {
- if ui.checkFilterExists(typ) {
- return fmt.Errorf("filter %s already exists", typ)
- }
- ui.filters[typ] = parser
- ui.filterHints[typ] = helpHint
- return nil
-}
-
-func (ui *UI) AddCompositeFilter(
- typ string,
- parser func(string) (map[string]any, error),
- helpHint string,
-) error {
- if ui.checkFilterExists(typ) {
- return fmt.Errorf("filter %s already exists", typ)
- }
- ui.compositeFilters[typ] = parser
- ui.filterHints[typ] = helpHint
- return nil
-}
-
-func (ui *UI) stopOnError(err error) {
- if err != nil {
- ui.onStop()
- ui.app.QueueEvent(tcell.NewEventError(err))
- }
-}
-
-func (ui *UI) stop() {
- ui.onStop()
- ui.app.Stop()
-}
-
-func (ui *UI) movePrevPage() {
- if len(ui.pageHistory) != 0 {
- ui.mountedPage.Unmount()
- ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1]
- ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1]
- ui.triggerDraw()
- }
-}
-
-func (ui *UI) moveNextPage(page Primitive) {
- ui.pageToMount = page
- ui.triggerDraw()
-}
-
-func (ui *UI) triggerDraw() {
- go ui.app.QueueUpdateDraw(func() {})
-}
-
-func (ui *UI) Draw(screen tcell.Screen) {
- if ui.isLoading.Load() {
- ui.draw(screen)
- return
- }
-
- ui.isLoading.Store(true)
-
- ctx, cancel := context.WithCancel(ui.ctx)
-
- ready := make(chan struct{})
- go func() {
- ui.load(ctx)
-
- cancel()
- close(ready)
- ui.isLoading.Store(false)
- }()
-
- select {
- case <-ready:
- case <-time.After(ui.loadingIndicatorLag):
- ui.loadingBar.Start(ui.ctx)
- ui.cancelLoading = cancel
-
- go func() {
- <-ready
- ui.loadingBar.Stop()
- ui.triggerDraw()
- }()
- }
-
- ui.draw(screen)
-}
-
-func (ui *UI) load(ctx context.Context) {
- if ui.mountedPage == nil && ui.pageToMount == nil {
- ui.stop()
- return
- }
-
- if ui.pageToMount != nil {
- ui.mountAndUpdate(ctx)
- } else {
- ui.update(ctx)
- }
-}
-
-func (ui *UI) draw(screen tcell.Screen) {
- ui.DrawForSubclass(screen, ui)
- x, y, width, height := ui.GetInnerRect()
-
- var (
- pageToDraw tview.Primitive
- barToDraw tview.Primitive
- )
-
- switch {
- case ui.isShowingHelp:
- if ui.helpPage == nil {
- var filters []string
- for f := range ui.filters {
- filters = append(filters, f)
- }
- for f := range ui.compositeFilters {
- filters = append(filters, f)
- }
- ui.helpPage = NewHelpPage(filters, ui.filterHints)
- }
- pageToDraw = ui.helpPage
- case ui.mountedPage != nil:
- pageToDraw = ui.mountedPage
- default:
- pageToDraw = ui.pageStub
- }
-
- pageToDraw.SetRect(x, y, width, height-1)
- pageToDraw.Draw(screen)
-
- // Search bar uses cursor and we need to hide it when another bar is drawn.
- screen.HideCursor()
-
- switch {
- case ui.isLoading.Load():
- barToDraw = ui.loadingBar
- case ui.isSearching:
- barToDraw = ui.searchBar
- case ui.isShowingError:
- barToDraw = ui.searchErrorBar
- case ui.isShowingHelp:
- barToDraw = ui.helpBar
- default:
- barToDraw = ui.infoBar
- }
-
- barToDraw.SetRect(x, y+height-1, width, 1)
- barToDraw.Draw(screen)
-}
-
-func (ui *UI) mountAndUpdate(ctx context.Context) {
- defer func() {
- // Operation succeeded or was canceled, either way reset page to mount.
- ui.pageToMount = nil
- }()
-
- // Mount should use app global context.
- //nolint:contextcheck
- err := ui.pageToMount.Mount(ui.ctx)
- if err != nil {
- ui.stopOnError(err)
- return
- }
-
- x, y, width, height := ui.GetInnerRect()
- ui.pageToMount.SetRect(x, y, width, height-1)
-
- s := loadOp(ctx, ui.pageToMount.Update)
- if s.err != nil {
- ui.pageToMount.Unmount()
- ui.stopOnError(s.err)
- return
- }
- // Update was canceled.
- if !s.done {
- ui.pageToMount.Unmount()
- return
- }
-
- if ui.mountedPage != nil {
- ui.pageHistory = append(ui.pageHistory, ui.mountedPage)
- }
- ui.mountedPage = ui.pageToMount
-}
-
-func (ui *UI) update(ctx context.Context) {
- x, y, width, height := ui.GetInnerRect()
- ui.mountedPage.SetRect(x, y, width, height-1)
-
- s := loadOp(ctx, ui.mountedPage.Update)
- if s.err != nil {
- ui.stopOnError(s.err)
- return
- }
-}
-
-type status struct {
- done bool
- err error
-}
-
-func loadOp(ctx context.Context, op func(ctx context.Context) error) status {
- errCh := make(chan error)
- go func() {
- errCh <- op(ctx)
- }()
-
- select {
- case <-ctx.Done():
- return status{done: false, err: nil}
- case err := <-errCh:
- return status{done: true, err: err}
- }
-}
-
-func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
- return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
- switch {
- case ui.isLoading.Load():
- ui.handleInputOnLoading(event)
- case ui.isShowingHelp:
- ui.handleInputOnShowingHelp(event)
- case ui.isShowingError:
- ui.handleInputOnShowingError()
- case ui.isSearching:
- ui.handleInputOnSearching(event)
- default:
- ui.handleInput(event)
- }
- })
-}
-
-func (ui *UI) handleInput(event *tcell.EventKey) {
- m, k, r := event.Modifiers(), event.Key(), event.Rune()
-
- switch {
- case k == tcell.KeyEsc:
- ui.movePrevPage()
- case m == 0 && k == tcell.KeyRune && r == 'h':
- ui.isShowingHelp = true
- case m == 0 && k == tcell.KeyRune && r == '/':
- ui.isSearching = true
- case m == 0 && k == tcell.KeyRune && r == 'q':
- ui.stop()
- default:
- if ui.mountedPage != nil {
- ui.mountedPage.InputHandler()(event, func(tview.Primitive) {})
- }
- }
-}
-
-func (ui *UI) handleInputOnLoading(event *tcell.EventKey) {
- switch k, r := event.Key(), event.Rune(); {
- case k == tcell.KeyEsc:
- ui.cancelLoading()
- case k == tcell.KeyRune && r == 'q':
- ui.stop()
- }
-}
-
-func (ui *UI) handleInputOnShowingError() {
- ui.isShowingError = false
- ui.isSearching = true
-}
-
-func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
- k, r := event.Key(), event.Rune()
-
- switch {
- case k == tcell.KeyEsc:
- ui.isShowingHelp = false
- case k == tcell.KeyRune && r == 'q':
- ui.stop()
- default:
- ui.helpPage.InputHandler()(event, func(tview.Primitive) {})
- }
-}
-
-func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
- m, k := event.Modifiers(), event.Key()
-
- switch {
- case k == tcell.KeyEnter:
- prompt := ui.searchBar.GetText()
-
- res, err := ui.processPrompt(prompt)
- if err != nil {
- ui.isShowingError = true
- ui.isSearching = false
- ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)")
- return
- }
-
- switch v := ui.mountedPage.(type) {
- case *BucketsView:
- ui.moveNextPage(NewBucketsView(ui, res))
- case *RecordsView:
- bucket := v.bucket
- ui.moveNextPage(NewRecordsView(ui, bucket, res))
- }
-
- if ui.searchBar.GetText() != "" {
- ui.searchBar.AddToHistory(ui.searchBar.GetText())
- }
-
- ui.searchBar.SetText("")
- ui.isSearching = false
- case k == tcell.KeyEsc:
- ui.isSearching = false
- case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0:
- ui.isSearching = false
- default:
- ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
- }
-
- ui.MouseHandler()
-}
-
-func (ui *UI) WithPrompt(prompt string) error {
- filter, err := ui.processPrompt(prompt)
- if err != nil {
- return err
- }
-
- ui.pageToMount = NewBucketsView(ui, filter)
-
- if prompt != "" {
- ui.searchBar.AddToHistory(prompt)
- }
-
- return nil
-}
-
-func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) {
- if prompt == "" {
- return NewFilter(nil), nil
- }
-
- filterMap := make(map[string]any)
-
- for _, filterString := range strings.Split(prompt, "+") {
- parts := strings.Split(filterString, ":")
- if len(parts) != 2 {
- return nil, errors.New("expected 'tag:value [+ tag:value]...'")
- }
-
- filterTag := strings.TrimSpace(parts[0])
- filterValueString := strings.TrimSpace(parts[1])
-
- if _, exists := filterMap[filterTag]; exists {
- return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag)
- }
-
- parser, ok := ui.filters[filterTag]
- if ok {
- filterValue, err := parser(filterValueString)
- if err != nil {
- return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err)
- }
-
- filterMap[filterTag] = filterValue
- continue
- }
-
- compositeParser, ok := ui.compositeFilters[filterTag]
- if ok {
- compositeFilterValue, err := compositeParser(filterValueString)
- if err != nil {
- return nil, fmt.Errorf(
- "can't parse '%s' filter value '%s': %w",
- filterTag, filterValueString, err,
- )
- }
-
- for tag, value := range compositeFilterValue {
- if _, exists := filterMap[tag]; exists {
- return nil, fmt.Errorf(
- "found duplicate filter tag '%s' while processing composite filter with tag '%s'",
- tag, filterTag,
- )
- }
-
- filterMap[tag] = value
- }
- continue
- }
-
- return nil, fmt.Errorf("unknown filter tag '%s'", filterTag)
- }
-
- return NewFilter(filterMap), nil
-}
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
deleted file mode 100644
index 2d1ab3e33..000000000
--- a/cmd/frostfs-lens/internal/tui/util.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package tui
-
-import (
- "errors"
- "strings"
- "time"
-
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
- "go.etcd.io/bbolt"
-)
-
-func OpenDB(path string, writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(path, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- Timeout: 100 * time.Millisecond,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
-
-func CIDParser(s string) (any, error) {
- data, err := base58.Decode(s)
- if err != nil {
- return nil, err
- }
- var id cid.ID
- if err = id.Decode(data); err != nil {
- return nil, err
- }
- return id, nil
-}
-
-func OIDParser(s string) (any, error) {
- data, err := base58.Decode(s)
- if err != nil {
- return nil, err
- }
- var id oid.ID
- if err = id.Decode(data); err != nil {
- return nil, err
- }
- return id, nil
-}
-
-func AddressParser(s string) (map[string]any, error) {
- m := make(map[string]any)
-
- parts := strings.Split(s, "/")
- if len(parts) != 2 {
- return nil, errors.New("expected /")
- }
- cnr, err := CIDParser(parts[0])
- if err != nil {
- return nil, err
- }
- obj, err := OIDParser(parts[1])
- if err != nil {
- return nil, err
- }
-
- m["cid"] = cnr
- m["oid"] = obj
-
- return m, nil
-}
-
-func keyParser(s string) (any, error) {
- if s == "" {
- return nil, errors.New("empty attribute key")
- }
- return s, nil
-}
-
-func valueParser(s string) (any, error) {
- if s == "" {
- return nil, errors.New("empty attribute value")
- }
- return s, nil
-}
-
-func AttributeParser(s string) (map[string]any, error) {
- m := make(map[string]any)
-
- parts := strings.Split(s, "/")
- if len(parts) != 1 && len(parts) != 2 {
- return nil, errors.New("expected or /")
- }
-
- key, err := keyParser(parts[0])
- if err != nil {
- return nil, err
- }
- m["key"] = key
-
- if len(parts) == 1 {
- return m, nil
- }
-
- value, err := valueParser(parts[1])
- if err != nil {
- return nil, err
- }
- m["value"] = value
-
- return m, nil
-}
diff --git a/cmd/frostfs-lens/internal/writecache/inspect.go b/cmd/frostfs-lens/internal/writecache/inspect.go
index afc986c8b..1a733513b 100644
--- a/cmd/frostfs-lens/internal/writecache/inspect.go
+++ b/cmd/frostfs-lens/internal/writecache/inspect.go
@@ -1,10 +1,13 @@
package writecache
import (
+ "fmt"
"os"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
@@ -20,17 +23,34 @@ func init() {
common.AddAddressFlag(inspectCMD, &vAddress)
common.AddComponentPathFlag(inspectCMD, &vPath)
common.AddOutputFileFlag(inspectCMD, &vOut)
+ common.AddDBTypeFlag(inspectCMD, &vDBType)
}
func inspectFunc(cmd *cobra.Command, _ []string) {
var data []byte
- db, err := writecache.OpenDB(vPath, true, os.OpenFile)
- common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
- defer db.Close()
+ switch vDBType {
+ case "bbolt":
+ db, err := writecachebbolt.OpenDB(vPath, true, os.OpenFile)
+ common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
+ defer db.Close()
- data, err = writecache.Get(db, []byte(vAddress))
- common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
+ data, err = writecachebbolt.Get(db, []byte(vAddress))
+ common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
+
+ case "badger":
+ log, err := logger.NewLogger(&logger.Prm{})
+ common.ExitOnErr(cmd, common.Errf("could not create logger: %w", err))
+
+ db, err := writecachebadger.OpenDB(vPath, true, log)
+ common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
+
+ data, err = writecachebadger.Get(db, []byte(vAddress))
+ common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
+
+ default:
+ common.ExitOnErr(cmd, fmt.Errorf("invalid dbtype: %q (possible values: bbolt, badger)", vDBType))
+ }
var o objectSDK.Object
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", o.Unmarshal(data)))
diff --git a/cmd/frostfs-lens/internal/writecache/list.go b/cmd/frostfs-lens/internal/writecache/list.go
index bcbae0ec9..df02a82f7 100644
--- a/cmd/frostfs-lens/internal/writecache/list.go
+++ b/cmd/frostfs-lens/internal/writecache/list.go
@@ -6,7 +6,9 @@ import (
"os"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@@ -31,10 +33,26 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
- db, err := writecache.OpenDB(vPath, true, os.OpenFile)
- common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
- defer db.Close()
+ switch vDBType {
+ case "bbolt":
+ db, err := writecachebbolt.OpenDB(vPath, true, os.OpenFile)
+ common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
+ defer db.Close()
- err = writecache.IterateDB(db, wAddr)
- common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
+ err = writecachebbolt.IterateDB(db, wAddr)
+ common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
+
+ case "badger":
+ log, err := logger.NewLogger(&logger.Prm{})
+ common.ExitOnErr(cmd, common.Errf("could not create logger: %w", err))
+
+ db, err := writecachebadger.OpenDB(vPath, true, log)
+ common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
+
+ err = writecachebadger.IterateDB(db, wAddr)
+ common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
+
+ default:
+ common.ExitOnErr(cmd, fmt.Errorf("invalid dbtype: %q (possible values: bbolt, badger)", vDBType))
+ }
}
diff --git a/cmd/frostfs-lens/internal/writecache/root.go b/cmd/frostfs-lens/internal/writecache/root.go
index d7d6db240..11a8bb96b 100644
--- a/cmd/frostfs-lens/internal/writecache/root.go
+++ b/cmd/frostfs-lens/internal/writecache/root.go
@@ -8,6 +8,7 @@ var (
vAddress string
vPath string
vOut string
+ vDBType string
)
// Root contains `write-cache` command definition.
@@ -17,5 +18,5 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD, tuiCMD)
+ Root.AddCommand(listCMD, inspectCMD)
}
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
deleted file mode 100644
index b7e4d7c96..000000000
--- a/cmd/frostfs-lens/internal/writecache/tui.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package writecache
-
-import (
- "context"
- "fmt"
-
- common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
- schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
- "github.com/rivo/tview"
- "github.com/spf13/cobra"
-)
-
-var tuiCMD = &cobra.Command{
- Use: "explore",
- Short: "Write cache exploration with a terminal UI",
- Long: `Launch a terminal UI to explore write cache and search for data.
-
-Available search filters:
-- cid CID
-- oid OID
-- addr CID/OID
-`,
- Run: tuiFunc,
-}
-
-var initialPrompt string
-
-func init() {
- common.AddComponentPathFlag(tuiCMD, &vPath)
-
- tuiCMD.Flags().StringVar(
- &initialPrompt,
- "filter",
- "",
- "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
- )
-}
-
-func tuiFunc(cmd *cobra.Command, _ []string) {
- common.ExitOnErr(cmd, runTUI(cmd))
-}
-
-func runTUI(cmd *cobra.Command) error {
- db, err := tui.OpenDB(vPath, false)
- if err != nil {
- return fmt.Errorf("couldn't open database: %w", err)
- }
- defer db.Close()
-
- ctx, cancel := context.WithCancel(cmd.Context())
- defer cancel()
-
- app := tview.NewApplication()
- ui := tui.NewUI(ctx, app, db, schema.WritecacheParser, nil)
-
- _ = ui.AddFilter("cid", tui.CIDParser, "CID")
- _ = ui.AddFilter("oid", tui.OIDParser, "OID")
- _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
-
- err = ui.WithPrompt(initialPrompt)
- if err != nil {
- return fmt.Errorf("invalid filter prompt: %w", err)
- }
-
- app.SetRoot(ui, true).SetFocus(ui)
- return app.Run()
-}
diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go
index 2d52e0c56..d04f34ff1 100644
--- a/cmd/frostfs-node/accounting.go
+++ b/cmd/frostfs-node/accounting.go
@@ -2,19 +2,18 @@ package main
import (
"context"
- "net"
- "strings"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc"
accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
- "google.golang.org/grpc"
)
func initAccountingService(ctx context.Context, c *cfg) {
- c.initMorphComponents(ctx)
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
fatalOnErr(err)
@@ -29,29 +28,7 @@ func initAccountingService(ctx context.Context, c *cfg) {
),
)
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- accountingGRPC.RegisterAccountingServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server)
- })
-}
-
-// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support.
-func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc {
- sdLegacy := new(grpc.ServiceDesc)
- *sdLegacy = sd
-
- const (
- legacyNamespace = "neo.fs.v2"
- apemanagerLegacyNamespace = "frostfs.v2"
- newNamespace = "frost.fs"
- )
-
- if strings.HasPrefix(sd.ServiceName, legacyNamespace) {
- sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace)
- } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) {
- sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace)
+ for _, srv := range c.cfgGRPC.servers {
+ accountingGRPC.RegisterAccountingServiceServer(srv, server)
}
- return sdLegacy
}
diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go
deleted file mode 100644
index 513314712..000000000
--- a/cmd/frostfs-node/apemanager.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package main
-
-import (
- "net"
-
- ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
- morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
- "google.golang.org/grpc"
-)
-
-func initAPEManagerService(c *cfg) {
- contractStorage := ape_contract.NewProxyVerificationContractStorage(
- morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
- c.key,
- c.cfgMorph.proxyScriptHash,
- c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
-
- execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
- c.cfgMorph.client,
- apemanager.WithLogger(c.log))
- sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
- auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
- server := apemanager_transport.New(auditSvc)
-
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- apemanager_grpc.RegisterAPEManagerServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server)
- })
-}
diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go
index ce8ae9662..64c3beba7 100644
--- a/cmd/frostfs-node/attributes.go
+++ b/cmd/frostfs-node/attributes.go
@@ -6,5 +6,9 @@ import (
)
func parseAttributes(c *cfg) {
+ if nodeconfig.Relay(c.appCfg) {
+ return
+ }
+
fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg)))
}
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index e5df0a22d..6a5d5d182 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -1,53 +1,53 @@
package main
import (
- "bytes"
- "cmp"
- "context"
- "slices"
"sync"
- "sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/hashicorp/golang-lru/v2/expirable"
- "github.com/hashicorp/golang-lru/v2/simplelru"
- "go.uber.org/zap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ lru "github.com/hashicorp/golang-lru/v2"
)
-type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
+type netValueReader[K any, V any] func(K) (V, error)
-type valueWithError[V any] struct {
+type valueWithTime[V any] struct {
v V
+ t time.Time
// cached error in order to not repeat failed request for some time
e error
}
// entity that provides TTL cache interface.
type ttlNetCache[K comparable, V any] struct {
- cache *expirable.LRU[K, *valueWithError[V]]
- netRdr netValueReader[K, V]
+ ttl time.Duration
+
+ sz int
+
+ cache *lru.Cache[K, *valueWithTime[V]]
+
+ netRdr netValueReader[K, V]
+
keyLocker *utilSync.KeyLocker[K]
- metrics cacheMetrics
}
// complicates netValueReader with TTL caching mechanism.
-func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V], metrics cacheMetrics) *ttlNetCache[K, V] {
- cache := expirable.NewLRU[K, *valueWithError[V]](sz, nil, ttl)
+func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V]) *ttlNetCache[K, V] {
+ cache, err := lru.New[K, *valueWithTime[V]](sz)
+ fatalOnErr(err)
return &ttlNetCache[K, V]{
+ ttl: ttl,
+ sz: sz,
cache: cache,
netRdr: netRdr,
- metrics: metrics,
keyLocker: utilSync.NewKeyLocker[K](),
}
}
@@ -57,16 +57,9 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
-func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
- }()
-
+func (c *ttlNetCache[K, V]) get(key K) (V, error) {
val, ok := c.cache.Peek(key)
- if ok {
- hit = true
+ if ok && time.Since(val.t) < c.ttl {
return val.v, val.e
}
@@ -74,15 +67,15 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
defer c.keyLocker.Unlock(key)
val, ok = c.cache.Peek(key)
- if ok {
- hit = true
+ if ok && time.Since(val.t) < c.ttl {
return val.v, val.e
}
- v, err := c.netRdr(ctx, key)
+ v, err := c.netRdr(key)
- c.cache.Add(key, &valueWithError[V]{
+ c.cache.Add(key, &valueWithTime[V]{
v: v,
+ t: time.Now(),
e: err,
})
@@ -90,31 +83,60 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
}
func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
- startedAt := time.Now()
- defer func() {
- c.metrics.AddMethodDuration("Set", time.Since(startedAt), false)
- }()
-
c.keyLocker.Lock(k)
defer c.keyLocker.Unlock(k)
- c.cache.Add(k, &valueWithError[V]{
+ c.cache.Add(k, &valueWithTime[V]{
v: v,
+ t: time.Now(),
e: e,
})
}
func (c *ttlNetCache[K, V]) remove(key K) {
- hit := false
- startedAt := time.Now()
- defer func() {
- c.metrics.AddMethodDuration("Remove", time.Since(startedAt), hit)
- }()
-
c.keyLocker.Lock(key)
defer c.keyLocker.Unlock(key)
- hit = c.cache.Remove(key)
+ c.cache.Remove(key)
+}
+
+// entity that provides LRU cache interface.
+type lruNetCache struct {
+ cache *lru.Cache[uint64, *netmapSDK.NetMap]
+
+ netRdr netValueReader[uint64, *netmapSDK.NetMap]
+}
+
+// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
+func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]) *lruNetCache {
+ cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
+ fatalOnErr(err)
+
+ return &lruNetCache{
+ cache: cache,
+ netRdr: netRdr,
+ }
+}
+
+// reads value by the key.
+//
+// updates the value from the network on cache miss.
+//
+// returned value should not be modified.
+func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
+ val, ok := c.cache.Get(key)
+ if ok {
+ return val, nil
+ }
+
+ val, err := c.netRdr(key)
+ if err != nil {
+ return nil, err
+ }
+
+ c.cache.Add(key, val)
+
+ return val, nil
}
// wrapper over TTL cache of values read from the network
@@ -124,13 +146,15 @@ type ttlContainerStorage struct {
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
}
-func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
- lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) {
- return v.Get(ctx, id)
- }, metrics.NewCacheMetrics("container"))
- lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
- return v.DeletionInfo(ctx, id)
- }, metrics.NewCacheMetrics("container_deletion_info"))
+func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
+ const containerCacheSize = 100
+
+ lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
+ return v.Get(id)
+ })
+ lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
+ return v.DeletionInfo(id)
+ })
return ttlContainerStorage{
containerCache: lruCnrCache,
@@ -147,245 +171,68 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
-func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) {
- return s.containerCache.get(ctx, cnr)
+func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
+ return s.containerCache.get(cnr)
}
-func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) {
- return s.delInfoCache.get(ctx, cnr)
+func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
+ return s.delInfoCache.get(cnr)
+}
+
+type ttlEACLStorage struct {
+ *ttlNetCache[cid.ID, *container.EACL]
+}
+
+func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage {
+ const eaclCacheSize = 100
+
+ lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) {
+ return v.GetEACL(id)
+ })
+
+ return ttlEACLStorage{lruCnrCache}
+}
+
+// GetEACL returns eACL value from the cache. If value is missing in the cache
+// or expired, then it returns value from side chain and updates cache.
+func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
+ return s.get(cnr)
+}
+
+// InvalidateEACL removes cached eACL value.
+func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
+ s.remove(cnr)
}
type lruNetmapSource struct {
netState netmap.State
- client rawSource
- cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
- mtx sync.RWMutex
- metrics cacheMetrics
- log *logger.Logger
- candidates atomic.Pointer[[]netmapSDK.NodeInfo]
+ cache *lruNetCache
}
-type rawSource interface {
- GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
- GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
-}
-
-func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
- netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
-) netmap.Source {
+func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
- cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
- fatalOnErr(err)
+ lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
+ return v.GetNetMapByEpoch(key)
+ })
- src := &lruNetmapSource{
- netState: netState,
- client: client,
- cache: cache,
- log: log,
- metrics: metrics.NewCacheMetrics("netmap"),
- }
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- src.updateCandidates(ctx, d)
- }()
-
- return src
-}
-
-// updateCandidates routine to merge netmap in cache with candidates list.
-func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
- timer := time.NewTimer(d)
- defer timer.Stop()
-
- for {
- select {
- case <-ctx.Done():
- return
- case <-timer.C:
- newCandidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
- timer.Reset(d)
- break
- }
- if len(newCandidates) == 0 {
- s.candidates.Store(&newCandidates)
- timer.Reset(d)
- break
- }
- slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
- return cmp.Compare(n1.Hash(), n2.Hash())
- })
-
- // Check once state changed
- v := s.candidates.Load()
- if v == nil {
- s.candidates.Store(&newCandidates)
- s.mergeCacheWithCandidates(newCandidates)
- timer.Reset(d)
- break
- }
- ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
- if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
- uint32(n1.Status()) != uint32(n2.Status()) ||
- slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
- return 1
- }
- ne1 := slices.Collect(n1.NetworkEndpoints())
- ne2 := slices.Collect(n2.NetworkEndpoints())
- return slices.Compare(ne1, ne2)
- })
- if ret != 0 {
- s.candidates.Store(&newCandidates)
- s.mergeCacheWithCandidates(newCandidates)
- }
- timer.Reset(d)
- }
+ return &lruNetmapSource{
+ netState: s,
+ cache: lruNetmapCache,
}
}
-func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
- s.mtx.Lock()
- tmp := s.cache.Values()
- s.mtx.Unlock()
- for _, pointer := range tmp {
- nm := pointer.Load()
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- nm = nm.Clone()
- mergeNetmapWithCandidates(updates, nm)
- pointer.Store(nm)
- }
- }
+func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
}
-// reads value by the key.
-//
-// updates the value from the network on cache miss.
-//
-// returned value should not be modified.
-func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
- }()
-
- s.mtx.RLock()
- val, ok := s.cache.Get(key)
- s.mtx.RUnlock()
- if ok {
- hit = true
- return val.Load(), nil
- }
-
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- val, ok = s.cache.Get(key)
- if ok {
- hit = true
- return val.Load(), nil
- }
-
- nm, err := s.client.GetNetMapByEpoch(ctx, key)
- if err != nil {
- return nil, err
- }
- v := s.candidates.Load()
- if v != nil {
- updates := getNetMapNodesToUpdate(nm, *v)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- }
-
- p := atomic.Pointer[netmapSDK.NetMap]{}
- p.Store(nm)
- s.cache.Add(key, &p)
-
- return nm, nil
+func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+ return s.getNetMapByEpoch(epoch)
}
-// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
-func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
- for _, v := range updates {
- if v.status != netmapSDK.UnspecifiedState {
- nm.Nodes()[v.netmapIndex].SetStatus(v.status)
- }
- if v.externalAddresses != nil {
- nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
- }
- if v.endpoints != nil {
- nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
- }
- }
-}
-
-type nodeToUpdate struct {
- netmapIndex int
- status netmapSDK.NodeState
- externalAddresses []string
- endpoints []string
-}
-
-// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
-func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
- var res []nodeToUpdate
- for i := range nm.Nodes() {
- for _, cnd := range candidates {
- if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
- var tmp nodeToUpdate
- var update bool
-
- if cnd.Status() != nm.Nodes()[i].Status() &&
- (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
- update = true
- tmp.status = cnd.Status()
- }
-
- externalAddresses := cnd.ExternalAddresses()
- if externalAddresses != nil &&
- slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
- update = true
- tmp.externalAddresses = externalAddresses
- }
-
- nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
- nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints())
- candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
- candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints())
- if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
- update = true
- tmp.endpoints = candidateEndpoints
- }
-
- if update {
- tmp.netmapIndex = i
- res = append(res, tmp)
- }
-
- break
- }
- }
- }
- return res
-}
-
-func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
-}
-
-func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- return s.getNetMapByEpoch(ctx, epoch)
-}
-
-func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- val, err := s.get(ctx, epoch)
+func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
+ val, err := s.cache.get(epoch)
if err != nil {
return nil, err
}
@@ -393,18 +240,126 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil
}
-func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) {
+func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil
}
+// wrapper over TTL cache of values read from the network
+// that implements container lister.
+type ttlContainerLister struct {
+ inner *ttlNetCache[string, *cacheItemContainerList]
+ client *cntClient.Client
+}
+
+// value type for ttlNetCache used by ttlContainerLister.
+type cacheItemContainerList struct {
+ // protects list from concurrent add/remove ops
+ mtx sync.RWMutex
+ // actual list of containers owner by the particular user
+ list []cid.ID
+}
+
+func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) ttlContainerLister {
+ const containerListerCacheSize = 100
+
+ lruCnrListerCache := newNetworkTTLCache(containerListerCacheSize, ttl, func(strID string) (*cacheItemContainerList, error) {
+ var id *user.ID
+
+ if strID != "" {
+ id = new(user.ID)
+
+ err := id.DecodeString(strID)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ list, err := c.ContainersOf(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return &cacheItemContainerList{
+ list: list,
+ }, nil
+ })
+
+ return ttlContainerLister{inner: lruCnrListerCache, client: c}
+}
+
+// List returns list of container IDs from the cache. If list is missing in the
+// cache or expired, then it returns container IDs from side chain and updates
+// the cache.
+func (s ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
+ if id == nil {
+ return s.client.ContainersOf(nil)
+ }
+
+ item, err := s.inner.get(id.EncodeToString())
+ if err != nil {
+ return nil, err
+ }
+
+ item.mtx.RLock()
+ res := make([]cid.ID, len(item.list))
+ copy(res, item.list)
+ item.mtx.RUnlock()
+
+ return res, nil
+}
+
+// updates cached list of owner's containers: cnr is added if flag is true, otherwise it's removed.
+// Concurrent calls can lead to some races:
+// - two parallel additions to missing owner's cache can lead to only one container to be cached
+// - async cache value eviction can lead to idle addition
+//
+// All described race cases aren't critical since cache values expire anyway, we just try
+// to increase cache actuality w/o huge overhead on synchronization.
+func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
+ strOwner := owner.EncodeToString()
+
+ val, ok := s.inner.cache.Peek(strOwner)
+ if !ok {
+ // we could cache the single cnr but in this case we will disperse
+ // with the Sidechain a lot
+ return
+ }
+
+ if s.inner.ttl <= time.Since(val.t) {
+ return
+ }
+
+ item := val.v
+
+ item.mtx.Lock()
+ {
+ found := false
+
+ for i := range item.list {
+ if found = item.list[i].Equals(cnr); found {
+ if !add {
+ item.list = append(item.list[:i], item.list[i+1:]...)
+ // if list became empty we don't remove the value from the cache
+ // since empty list is a correct value, and we don't want to insta
+ // re-request it from the Sidechain
+ }
+
+ break
+ }
+ }
+
+ if add && !found {
+ item.list = append(item.list, cnr)
+ }
+ }
+ item.mtx.Unlock()
+}
+
type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
-func newCachedIRFetcher(f interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-},
-) cachedIRFetcher {
+func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
@@ -418,9 +373,9 @@ func newCachedIRFetcher(f interface {
)
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
- func(ctx context.Context, _ struct{}) ([][]byte, error) {
- return f.InnerRingKeys(ctx)
- }, metrics.NewCacheMetrics("ir_keys"),
+ func(_ struct{}) ([][]byte, error) {
+ return f.InnerRingKeys()
+ },
)
return cachedIRFetcher{irFetcherCache}
@@ -429,8 +384,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
-func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) {
- val, err := f.get(ctx, struct{}{})
+func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
+ val, err := f.get(struct{}{})
if err != nil {
return nil, err
}
@@ -442,40 +397,31 @@ type ttlMaxObjectSizeCache struct {
mtx sync.RWMutex
lastUpdated time.Time
lastSize uint64
- src objectwriter.MaxSizeSource
- metrics cacheMetrics
+ src putsvc.MaxSizeSource
}
-func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.MaxSizeSource {
+func newCachedMaxObjectSizeSource(src putsvc.MaxSizeSource) putsvc.MaxSizeSource {
return &ttlMaxObjectSizeCache{
- src: src,
- metrics: metrics.NewCacheMetrics("max_object_size"),
+ src: src,
}
}
-func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
+func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30
- hit := false
- startedAt := time.Now()
- defer func() {
- c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
- }()
-
c.mtx.RLock()
prevUpdated := c.lastUpdated
size := c.lastSize
c.mtx.RUnlock()
if time.Since(prevUpdated) < ttl {
- hit = true
return size
}
c.mtx.Lock()
size = c.lastSize
if !c.lastUpdated.After(prevUpdated) {
- size = c.src.MaxObjectSize(ctx)
+ size = c.src.MaxObjectSize()
c.lastSize = size
c.lastUpdated = time.Now()
}
@@ -483,7 +429,3 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
return size
}
-
-type cacheMetrics interface {
- AddMethodDuration(method string, d time.Duration, hit bool)
-}
diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go
deleted file mode 100644
index 24286826f..000000000
--- a/cmd/frostfs-node/cache_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package main
-
-import (
- "context"
- "errors"
- "sync"
- "testing"
- "time"
-
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/stretchr/testify/require"
-)
-
-func TestTTLNetCache(t *testing.T) {
- ttlDuration := time.Millisecond * 50
- cache := newNetworkTTLCache(10, ttlDuration, testNetValueReader, &noopCacheMetricts{})
-
- key := "key"
-
- t.Run("Test Add and Get", func(t *testing.T) {
- ti := time.Now()
- cache.set(key, ti, nil)
- val, err := cache.get(context.Background(), key)
- require.NoError(t, err)
- require.Equal(t, ti, val)
- })
-
- t.Run("Test TTL", func(t *testing.T) {
- ti := time.Now()
- cache.set(key, ti, nil)
- time.Sleep(2 * ttlDuration)
- val, err := cache.get(context.Background(), key)
- require.NoError(t, err)
- require.NotEqual(t, val, ti)
- })
-
- t.Run("Test Remove", func(t *testing.T) {
- ti := time.Now()
- cache.set(key, ti, nil)
- cache.remove(key)
- val, err := cache.get(context.Background(), key)
- require.NoError(t, err)
- require.NotEqual(t, val, ti)
- })
-
- t.Run("Test Cache Error", func(t *testing.T) {
- cache.set("error", time.Now(), errors.New("mock error"))
- _, err := cache.get(context.Background(), "error")
- require.Error(t, err)
- require.Equal(t, "mock error", err.Error())
- })
-}
-
-func testNetValueReader(_ context.Context, key string) (time.Time, error) {
- if key == "error" {
- return time.Now(), errors.New("mock error")
- }
- return time.Now(), nil
-}
-
-type noopCacheMetricts struct{}
-
-func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
-
-type rawSrc struct{}
-
-func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
- node0 := netmapSDK.NodeInfo{}
- node0.SetPublicKey([]byte{byte(1)})
- node0.SetStatus(netmapSDK.Online)
- node0.SetExternalAddresses("1", "0")
- node0.SetNetworkEndpoints("1", "0")
-
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey([]byte{byte(1)})
- node1.SetStatus(netmapSDK.Online)
- node1.SetExternalAddresses("1", "0")
- node1.SetNetworkEndpoints("1", "0")
-
- return []netmapSDK.NodeInfo{node0, node1}, nil
-}
-
-func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- nm := netmapSDK.NetMap{}
- nm.SetEpoch(1)
-
- node0 := netmapSDK.NodeInfo{}
- node0.SetPublicKey([]byte{byte(1)})
- node0.SetStatus(netmapSDK.Maintenance)
- node0.SetExternalAddresses("0")
- node0.SetNetworkEndpoints("0")
-
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey([]byte{byte(1)})
- node1.SetStatus(netmapSDK.Maintenance)
- node1.SetExternalAddresses("0")
- node1.SetNetworkEndpoints("0")
-
- nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
-
- return &nm, nil
-}
-
-type st struct{}
-
-func (s *st) CurrentEpoch() uint64 {
- return 1
-}
-
-func TestNetmapStorage(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- wg := sync.WaitGroup{}
- cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
-
- nm, err := cache.GetNetMapByEpoch(ctx, 1)
- require.NoError(t, err)
- require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
- require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
- require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
-
- require.Eventually(t, func() bool {
- nm, err := cache.GetNetMapByEpoch(ctx, 1)
- require.NoError(t, err)
- for _, node := range nm.Nodes() {
- if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
- node.NumberOfNetworkEndpoints() == 2) {
- return false
- }
- }
- return true
- }, time.Second*5, time.Millisecond*10)
-
- cancel()
- wg.Wait()
-}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 96274e625..60e567c5a 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -15,33 +15,24 @@ import (
"syscall"
"time"
+ netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit"
contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
- treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -49,7 +40,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
shardmode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -60,7 +54,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
@@ -68,24 +61,17 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- policy_client "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
neogoutil "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.etcd.io/bbolt"
"go.uber.org/zap"
- "go.uber.org/zap/zapcore"
"google.golang.org/grpc"
)
@@ -107,36 +93,23 @@ type applicationConfiguration struct {
_read bool
LoggerCfg struct {
- level string
- destination string
- timestamp bool
- options []zap.Option
- tags [][]string
- }
-
- ObjectCfg struct {
- tombstoneLifetime uint64
- priorityMetrics []placement.Metric
+ level string
}
EngineCfg struct {
errorThreshold uint32
+ shardPoolSize uint32
shards []shardCfg
lowMem bool
}
-
- // if need to run node in compatibility with other versions mode
- cmode *atomic.Bool
}
type shardCfg struct {
- compression compression.Config
-
- smallSizeObjectLimit uint64
- refillMetabase bool
- refillMetabaseWorkersCount int
- mode shardmode.Mode
- limiter qos.Limiter
+ compress bool
+ smallSizeObjectLimit uint64
+ uncompressableContentType []string
+ refillMetabase bool
+ mode shardmode.Mode
metaCfg struct {
path string
@@ -148,21 +121,24 @@ type shardCfg struct {
subStorages []subStorageCfg
gcCfg struct {
- removerBatchSize int
- removerSleepInterval time.Duration
- expiredCollectorBatchSize int
- expiredCollectorWorkerCount int
+ removerBatchSize int
+ removerSleepInterval time.Duration
+ expiredCollectorBatchSize int
+ expiredCollectorWorkersCount int
}
writecacheCfg struct {
enabled bool
+ typ writecacheconfig.Type
path string
+ maxBatchSize int
+ maxBatchDelay time.Duration
+ smallObjectSize uint64
maxObjSize uint64
flushWorkerCount int
sizeLimit uint64
- countLimit uint64
noSync bool
- flushSizeLimit uint64
+ gcInterval time.Duration
}
piloramaCfg struct {
@@ -196,13 +172,10 @@ type subStorageCfg struct {
noSync bool
// blobovnicza-specific
- size uint64
- width uint64
- openedCacheSize int
- initWorkerCount int
- rebuildDropTimeout time.Duration
- openedCacheTTL time.Duration
- openedCacheExpInterval time.Duration
+ size uint64
+ width uint64
+ leafWidth uint64
+ openedCacheSize int
}
// readConfig fills applicationConfiguration with raw configuration values
@@ -220,100 +193,73 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
}
// clear if it is rereading
- cmode := a.cmode
*a = applicationConfiguration{}
- a.cmode = cmode
}
a._read = true
- a.cmode.Store(nodeconfig.CompatibilityMode(c))
// Logger
a.LoggerCfg.level = loggerconfig.Level(c)
- a.LoggerCfg.destination = loggerconfig.Destination(c)
- a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
- var opts []zap.Option
- if loggerconfig.ToLokiConfig(c).Enabled {
- opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
- return lokiCore
- })}
- }
- a.LoggerCfg.options = opts
- a.LoggerCfg.tags = loggerconfig.Tags(c)
-
- // Object
-
- a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
- locodeDBPath := nodeconfig.LocodeDBPath(c)
- parser, err := placement.NewMetricsParser(locodeDBPath)
- if err != nil {
- return fmt.Errorf("metrics parser creation: %w", err)
- }
- m, err := parser.ParseMetrics(objectconfig.Get(c).Priority())
- if err != nil {
- return fmt.Errorf("parse metrics: %w", err)
- }
- a.ObjectCfg.priorityMetrics = m
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
+ a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
-func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
- var target shardCfg
+func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
+ var newConfig shardCfg
- target.refillMetabase = source.RefillMetabase()
- target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
- target.mode = source.Mode()
- target.compression = source.Compression()
- target.smallSizeObjectLimit = source.SmallSizeLimit()
+ newConfig.refillMetabase = oldConfig.RefillMetabase()
+ newConfig.mode = oldConfig.Mode()
+ newConfig.compress = oldConfig.Compress()
+ newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
+ newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
- a.setShardWriteCacheConfig(&target, source)
+ a.setShardWriteCacheConfig(&newConfig, oldConfig)
- a.setShardPiloramaConfig(c, &target, source)
+ a.setShardPiloramaConfig(c, &newConfig, oldConfig)
- if err := a.setShardStorageConfig(&target, source); err != nil {
+ if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err
}
- a.setMetabaseConfig(&target, source)
+ a.setMetabaseConfig(&newConfig, oldConfig)
- a.setGCConfig(&target, source)
- if err := a.setLimiter(&target, source); err != nil {
- return err
- }
+ a.setGCConfig(&newConfig, oldConfig)
- a.EngineCfg.shards = append(a.EngineCfg.shards, target)
+ a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil
}
-func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
- writeCacheCfg := source.WriteCache()
+func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() {
- wc := &target.writecacheCfg
+ wc := &newConfig.writecacheCfg
wc.enabled = true
+ wc.typ = writeCacheCfg.Type()
wc.path = writeCacheCfg.Path()
+ wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
+ wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
- wc.flushWorkerCount = writeCacheCfg.WorkerCount()
+ wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
+ wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
wc.sizeLimit = writeCacheCfg.SizeLimit()
- wc.countLimit = writeCacheCfg.CountLimit()
wc.noSync = writeCacheCfg.NoSync()
- wc.flushSizeLimit = writeCacheCfg.MaxFlushingObjectsSize()
+ wc.gcInterval = writeCacheCfg.GCInterval()
}
}
-func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
+func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") {
- piloramaCfg := source.Pilorama()
- pr := &target.piloramaCfg
+ piloramaCfg := oldConfig.Pilorama()
+ pr := &newConfig.piloramaCfg
pr.enabled = true
pr.path = piloramaCfg.Path()
@@ -324,8 +270,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
}
}
-func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
- blobStorCfg := source.BlobStor()
+func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
+ blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg))
@@ -343,11 +289,8 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
sCfg.size = sub.Size()
sCfg.depth = sub.ShallowDepth()
sCfg.width = sub.ShallowWidth()
+ sCfg.leafWidth = sub.LeafWidth()
sCfg.openedCacheSize = sub.OpenedCacheSize()
- sCfg.openedCacheTTL = sub.OpenedCacheTTL()
- sCfg.openedCacheExpInterval = sub.OpenedCacheExpInterval()
- sCfg.initWorkerCount = sub.InitWorkerCount()
- sCfg.rebuildDropTimeout = sub.RebuildDropTimeout()
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
sCfg.depth = sub.Depth()
@@ -359,13 +302,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg)
}
- target.subStorages = ss
+ newConfig.subStorages = ss
return nil
}
-func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
- metabaseCfg := source.Metabase()
- m := &target.metaCfg
+func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ metabaseCfg := oldConfig.Metabase()
+ m := &newConfig.metaCfg
m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm()
@@ -373,22 +316,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
}
-func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
- gcCfg := source.GC()
- target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
- target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
- target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
- target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
-}
-
-func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
- limitsConfig := source.Limits().ToConfig()
- limiter, err := qos.NewLimiter(limitsConfig)
- if err != nil {
- return err
- }
- target.limiter = limiter
- return nil
+func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
+ gcCfg := oldConfig.GC()
+ newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
+ newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
+ newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
+ newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
}
// internals contains application-specific internals that are created
@@ -413,23 +346,19 @@ type internals struct {
healthStatus *atomic.Int32
// is node under maintenance
isMaintenance atomic.Bool
- audit *atomic.Bool
-
- sdNotify bool
}
// starts node's maintenance.
-func (c *cfg) startMaintenance(ctx context.Context) {
+func (c *cfg) startMaintenance() {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
- c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
+ c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
-func (c *internals) stopMaintenance(ctx context.Context) {
- if c.isMaintenance.CompareAndSwap(true, false) {
- c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
- }
+func (c *internals) stopMaintenance() {
+ c.isMaintenance.Store(false)
+ c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
// IsMaintenance checks if storage node is under maintenance.
@@ -460,8 +389,6 @@ type shared struct {
cnrClient *containerClient.Client
- frostfsidClient frostfsidcore.SubjectProvider
-
respSvc *response.Service
replicator *replicator.Replicator
@@ -471,37 +398,21 @@ type shared struct {
metricsCollector *metrics.NodeMetrics
metricsSvc *objectService.MetricCollector
-
- dialerSource *internalNet.DialerSource
}
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
+ logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
-type appConfigGuard struct {
- mtx sync.RWMutex
-}
-
-func (g *appConfigGuard) LockAppConfigShared() func() {
- g.mtx.RLock()
- return func() { g.mtx.RUnlock() }
-}
-
-func (g *appConfigGuard) LockAppConfigExclusive() func() {
- g.mtx.Lock()
- return func() { g.mtx.Unlock() }
-}
-
type cfg struct {
applicationConfiguration
internals
shared
dynamicConfiguration
- appConfigGuard
// configuration of the internal
// services
@@ -509,12 +420,11 @@ type cfg struct {
cfgMorph cfgMorph
cfgAccounting cfgAccounting
cfgContainer cfgContainer
- cfgFrostfsID cfgFrostfsID
cfgNodeInfo cfgNodeInfo
cfgNetmap cfgNetmap
cfgControlService cfgControlService
cfgObject cfgObject
- cfgQoSService cfgQoSService
+ cfgNotifications cfgNotifications
}
// ReadCurrentNetMap reads network map which has been cached at the
@@ -532,94 +442,26 @@ func (c *cfg) ReadCurrentNetMap(msg *netmapV2.NetMap) error {
return nil
}
-type grpcServer struct {
- Listener net.Listener
- Server *grpc.Server
- Endpoint string
-}
-
type cfgGRPC struct {
- // guard protects connections and handlers
- guard sync.RWMutex
- // servers must be protected with guard
- servers []grpcServer
- // handlers must be protected with guard
- handlers []func(e string, l net.Listener, s *grpc.Server)
+ listeners []net.Listener
- maxChunkSize uint64
- maxAddrAmount uint64
- reconnectTimeout time.Duration
+ servers []*grpc.Server
- limiter atomic.Pointer[limiting.SemaphoreLimiter]
-}
+ endpoints []string
-func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
- c.guard.Lock()
- defer c.guard.Unlock()
+ maxChunkSize uint64
- c.servers = append(c.servers, grpcServer{
- Listener: l,
- Server: s,
- Endpoint: e,
- })
-}
-
-func (c *cfgGRPC) appendAndHandle(e string, l net.Listener, s *grpc.Server) {
- c.guard.Lock()
- defer c.guard.Unlock()
-
- c.servers = append(c.servers, grpcServer{
- Listener: l,
- Server: s,
- Endpoint: e,
- })
-
- for _, h := range c.handlers {
- h(e, l, s)
- }
-}
-
-func (c *cfgGRPC) performAndSave(handler func(e string, l net.Listener, s *grpc.Server)) {
- c.guard.Lock()
- defer c.guard.Unlock()
-
- for _, conn := range c.servers {
- handler(conn.Endpoint, conn.Listener, conn.Server)
- }
-
- c.handlers = append(c.handlers, handler)
-}
-
-func (c *cfgGRPC) dropConnection(endpoint string) {
- c.guard.Lock()
- defer c.guard.Unlock()
-
- pos := -1
- for idx, srv := range c.servers {
- if srv.Endpoint == endpoint {
- pos = idx
- break
- }
- }
- if pos < 0 {
- return
- }
-
- c.servers[pos].Server.Stop() // closes listener
- c.servers = append(c.servers[0:pos], c.servers[pos+1:]...)
+ maxAddrAmount uint64
}
type cfgMorph struct {
- initialized bool
- guard sync.Mutex
-
client *client.Client
+ notaryEnabled bool
+
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
- containerCacheSize uint32
-
proxyScriptHash neogoutil.Uint160
}
@@ -630,14 +472,9 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
- parsers map[event.Type]event.NotificationParser
- subscribers map[event.Type][]event.Handler
- workerPool util.WorkerPool // pool for asynchronous handlers
- containerBatchSize uint32
-}
-
-type cfgFrostfsID struct {
- scriptHash neogoutil.Uint160
+ parsers map[event.Type]event.NotificationParser
+ subscribers map[event.Type][]event.Handler
+ workerPool util.WorkerPool // pool for asynchronous handlers
}
type cfgNetmap struct {
@@ -651,7 +488,9 @@ type cfgNetmap struct {
state *networkState
+ needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
+ startEpoch uint64 // epoch number when application is started
}
type cfgNodeInfo struct {
@@ -664,28 +503,38 @@ type cfgObject struct {
cnrSource container.Source
- cfgAccessPolicyEngine cfgAccessPolicyEngine
+ eaclSource container.EACLSource
pool cfgObjectRoutines
cfgLocalStorage cfgLocalStorage
- tombstoneLifetime *atomic.Uint64
+ tombstoneLifetime uint64
skipSessionTokenIssuerVerification bool
}
+type cfgNotifications struct {
+ enabled bool
+ nw notificationWriter
+ defaultTopic string
+}
+
type cfgLocalStorage struct {
localStorage *engine.StorageEngine
}
-type cfgAccessPolicyEngine struct {
- policyContractHash neogoutil.Uint160
-
- accessPolicyEngine *accessPolicyEngine
-}
-
type cfgObjectRoutines struct {
+ putRemote *ants.Pool
+
+ putRemoteCapacity int
+
+ putLocal *ants.Pool
+
+ putLocalCapacity int
+
+ replicatorPoolSize int
+
replication *ants.Pool
}
@@ -696,11 +545,7 @@ type cfgControlService struct {
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
func initCfg(appCfg *config.Config) *cfg {
- c := &cfg{
- applicationConfiguration: applicationConfiguration{
- cmode: &atomic.Bool{},
- },
- }
+ c := &cfg{}
err := c.readConfig(appCfg)
if err != nil {
@@ -709,29 +554,28 @@ func initCfg(appCfg *config.Config) *cfg {
key := nodeconfig.Key(appCfg)
- netState := newNetworkState()
-
- c.shared = initShared(appCfg, key, netState)
-
- netState.metrics = c.metricsCollector
-
logPrm, err := c.loggerPrm()
fatalOnErr(err)
- logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
+
+ logPrm.MetricsNamespace = "frostfs_node"
+
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
- logger.UpdateLevelForTags(logPrm)
c.internals = initInternals(appCfg, log)
+ relayOnly := nodeconfig.Relay(appCfg)
+
+ netState := newNetworkState()
+
+ c.shared = initShared(appCfg, key, netState, relayOnly)
+
c.cfgAccounting = cfgAccounting{
scriptHash: contractsconfig.Balance(appCfg),
}
c.cfgContainer = initContainer(appCfg)
- c.cfgFrostfsID = initFrostfsID(appCfg)
-
- c.cfgNetmap = initNetmap(appCfg, netState)
+ c.cfgNetmap = initNetmap(appCfg, netState, relayOnly)
c.cfgGRPC = initCfgGRPC()
@@ -742,6 +586,9 @@ func initCfg(appCfg *config.Config) *cfg {
user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey)
+ c.metricsCollector = metrics.NewNodeMetrics()
+ netState.metrics = c.metricsCollector
+
c.onShutdown(c.clientCache.CloseAll) // clean up connections
c.onShutdown(c.bgClientCache.CloseAll) // clean up connections
c.onShutdown(c.putClientCache.CloseAll) // clean up connections
@@ -754,9 +601,6 @@ func initInternals(appCfg *config.Config, log *logger.Logger) internals {
var healthStatus atomic.Int32
healthStatus.Store(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED))
- var auditRequests atomic.Bool
- auditRequests.Store(audit.Enabled(appCfg))
-
return internals{
done: make(chan struct{}),
appCfg: appCfg,
@@ -764,80 +608,51 @@ func initInternals(appCfg *config.Config, log *logger.Logger) internals {
log: log,
apiVersion: version.Current(),
healthStatus: &healthStatus,
- sdNotify: initSdNotify(appCfg),
- audit: &auditRequests,
}
}
-func initSdNotify(appCfg *config.Config) bool {
- if config.BoolSafe(appCfg.Sub("systemdnotify"), "enabled") {
- fatalOnErr(sdnotify.InitSocket())
- return true
- }
- return false
-}
+func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared {
+ var netAddr network.AddressGroup
-func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared {
- netAddr := nodeconfig.BootstrapAddresses(appCfg)
+ if !relayOnly {
+ netAddr = nodeconfig.BootstrapAddresses(appCfg)
+ }
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
fatalOnErr(err)
- nodeMetrics := metrics.NewNodeMetrics()
-
- ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics()))
- fatalOnErr(err)
-
cacheOpts := cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
Key: &key.PrivateKey,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
- DialerSource: ds,
}
return shared{
- key: key,
- binPublicKey: key.PublicKey().Bytes(),
- localAddr: netAddr,
- respSvc: response.NewService(netState),
- clientCache: cache.NewSDKClientCache(cacheOpts),
- bgClientCache: cache.NewSDKClientCache(cacheOpts),
- putClientCache: cache.NewSDKClientCache(cacheOpts),
- persistate: persistate,
- metricsCollector: nodeMetrics,
- dialerSource: ds,
+ key: key,
+ binPublicKey: key.PublicKey().Bytes(),
+ localAddr: netAddr,
+ respSvc: response.NewService(netState),
+ clientCache: cache.NewSDKClientCache(cacheOpts),
+ bgClientCache: cache.NewSDKClientCache(cacheOpts),
+ putClientCache: cache.NewSDKClientCache(cacheOpts),
+ persistate: persistate,
}
}
-func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config {
- result := internalNet.Config{
- Enabled: multinet.Enabled(appCfg),
- Balancer: multinet.Balancer(appCfg),
- Restrict: multinet.Restrict(appCfg),
- FallbackDelay: multinet.FallbackDelay(appCfg),
- Metrics: m,
- }
- sn := multinet.Subnets(appCfg)
- for _, s := range sn {
- result.Subnets = append(result.Subnets, internalNet.Subnet{
- Prefix: s.Mask,
- SourceIPs: s.SourceIPs,
- })
- }
- return result
-}
-
-func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap {
+func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err)
+ var reBootstrapTurnedOff atomic.Bool
+ reBootstrapTurnedOff.Store(relayOnly)
return cfgNetmap{
scriptHash: contractsconfig.Netmap(appCfg),
state: netState,
workerPool: netmapWorkerPool,
- reBoostrapTurnedOff: &atomic.Bool{},
+ needBootstrap: !relayOnly,
+ reBoostrapTurnedOff: &reBootstrapTurnedOff,
}
}
@@ -851,38 +666,31 @@ func initContainer(appCfg *config.Config) cfgContainer {
}
}
-func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
- return cfgFrostfsID{
- scriptHash: contractsconfig.FrostfsID(appCfg),
+func initCfgGRPC() cfgGRPC {
+ maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
+ maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
+
+ return cfgGRPC{
+ maxChunkSize: maxChunkSize,
+ maxAddrAmount: maxAddrAmount,
}
}
-func initCfgGRPC() (cfg cfgGRPC) {
- maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
- maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
-
- cfg.maxChunkSize = maxChunkSize
- cfg.maxAddrAmount = maxAddrAmount
-
- return
-}
-
func initCfgObject(appCfg *config.Config) cfgObject {
- var tsLifetime atomic.Uint64
- tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg))
return cfgObject{
pool: initObjectPool(appCfg),
- tombstoneLifetime: &tsLifetime,
+ tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
func (c *cfg) engineOpts() []engine.Option {
- var opts []engine.Option
+ opts := make([]engine.Option, 0, 4)
opts = append(opts,
+ engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
- engine.WithLogger(c.log.WithTag(logger.TagEngine)),
+ engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
)
@@ -898,30 +706,46 @@ type shardOptsWithID struct {
shOpts []shard.Option
}
-func (c *cfg) shardOpts(ctx context.Context) []shardOptsWithID {
+func (c *cfg) shardOpts() []shardOptsWithID {
shards := make([]shardOptsWithID, 0, len(c.EngineCfg.shards))
for _, shCfg := range c.EngineCfg.shards {
- shards = append(shards, c.getShardOpts(ctx, shCfg))
+ shards = append(shards, c.getShardOpts(shCfg))
}
return shards
}
-func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
- var writeCacheOpts []writecache.Option
+func (c *cfg) getWriteCacheOpts(shCfg shardCfg) writecacheconfig.Options {
+ var writeCacheOpts writecacheconfig.Options
if wcRead := shCfg.writecacheCfg; wcRead.enabled {
- writeCacheOpts = append(writeCacheOpts,
- writecache.WithPath(wcRead.path),
- writecache.WithFlushSizeLimit(wcRead.flushSizeLimit),
- writecache.WithMaxObjectSize(wcRead.maxObjSize),
- writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
- writecache.WithMaxCacheSize(wcRead.sizeLimit),
- writecache.WithMaxCacheCount(wcRead.countLimit),
- writecache.WithNoSync(wcRead.noSync),
- writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)),
- writecache.WithQoSLimiter(shCfg.limiter),
- )
+ switch wcRead.typ {
+ case writecacheconfig.TypeBBolt:
+ writeCacheOpts.Type = writecacheconfig.TypeBBolt
+ writeCacheOpts.BBoltOptions = append(writeCacheOpts.BBoltOptions,
+ writecachebbolt.WithPath(wcRead.path),
+ writecachebbolt.WithMaxBatchSize(wcRead.maxBatchSize),
+ writecachebbolt.WithMaxBatchDelay(wcRead.maxBatchDelay),
+ writecachebbolt.WithMaxObjectSize(wcRead.maxObjSize),
+ writecachebbolt.WithSmallObjectSize(wcRead.smallObjectSize),
+ writecachebbolt.WithFlushWorkersCount(wcRead.flushWorkerCount),
+ writecachebbolt.WithMaxCacheSize(wcRead.sizeLimit),
+ writecachebbolt.WithNoSync(wcRead.noSync),
+ writecachebbolt.WithLogger(c.log),
+ )
+ case writecacheconfig.TypeBadger:
+ writeCacheOpts.Type = writecacheconfig.TypeBadger
+ writeCacheOpts.BadgerOptions = append(writeCacheOpts.BadgerOptions,
+ writecachebadger.WithPath(wcRead.path),
+ writecachebadger.WithMaxObjectSize(wcRead.maxObjSize),
+ writecachebadger.WithFlushWorkersCount(wcRead.flushWorkerCount),
+ writecachebadger.WithMaxCacheSize(wcRead.sizeLimit),
+ writecachebadger.WithLogger(c.log),
+ writecachebadger.WithGCInterval(wcRead.gcInterval),
+ )
+ default:
+ panic(fmt.Sprintf("unknown writecache type: %q", wcRead.typ))
+ }
}
return writeCacheOpts
}
@@ -943,7 +767,7 @@ func (c *cfg) getPiloramaOpts(shCfg shardCfg) []pilorama.Option {
return piloramaOpts
}
-func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.SubStorage {
+func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
var ss []blobstor.SubStorage
for _, sRead := range shCfg.subStorages {
switch sRead.typ {
@@ -954,14 +778,9 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
blobovniczatree.WithBlobovniczaSize(sRead.size),
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
+ blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
- blobovniczatree.WithOpenedCacheTTL(sRead.openedCacheTTL),
- blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval),
- blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
- blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
- blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)),
- blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)),
- blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),
+ blobovniczatree.WithLogger(c.log),
}
if c.metricsCollector != nil {
@@ -972,7 +791,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
)
}
ss = append(ss, blobstor.SubStorage{
- Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
+ Storage: blobovniczatree.NewBlobovniczaTree(blobTreeOpts...),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < shCfg.smallSizeObjectLimit
},
@@ -983,7 +802,6 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
fstree.WithPerm(sRead.perm),
fstree.WithDepth(sRead.depth),
fstree.WithNoSync(sRead.noSync),
- fstree.WithLogger(c.log.WithTag(logger.TagFSTree)),
}
if c.metricsCollector != nil {
fstreeOpts = append(fstreeOpts,
@@ -995,7 +813,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
ss = append(ss, blobstor.SubStorage{
Storage: fstree.New(fstreeOpts...),
- Policy: func(_ *objectSDK.Object, _ []byte) bool {
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
return true
},
})
@@ -1007,15 +825,16 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
return ss
}
-func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID {
+func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
writeCacheOpts := c.getWriteCacheOpts(shCfg)
piloramaOpts := c.getPiloramaOpts(shCfg)
- ss := c.getSubstorageOpts(ctx, shCfg)
+ ss := c.getSubstorageOpts(shCfg)
blobstoreOpts := []blobstor.Option{
- blobstor.WithCompression(shCfg.compression),
+ blobstor.WithCompressObjects(shCfg.compress),
+ blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
blobstor.WithStorages(ss),
- blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)),
+ blobstor.WithLogger(c.log),
}
if c.metricsCollector != nil {
blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore())))
@@ -1034,15 +853,13 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
- shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
sh.configID = shCfg.id()
sh.shOpts = []shard.Option{
- shard.WithLogger(c.log.WithTag(logger.TagShard)),
+ shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
- shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
shard.WithMode(shCfg.mode),
shard.WithBlobStorOptions(blobstoreOpts...),
shard.WithMetaBaseOptions(mbOptions...),
@@ -1052,40 +869,31 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
- shard.WithExpiredCollectorWorkerCount(shCfg.gcCfg.expiredCollectorWorkerCount),
+ shard.WithExpiredCollectorWorkersCount(shCfg.gcCfg.expiredCollectorWorkersCount),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
fatalOnErr(err)
return pool
}),
- shard.WithLimiter(shCfg.limiter),
}
return sh
}
-func (c *cfg) loggerPrm() (logger.Prm, error) {
- var prm logger.Prm
- // (re)init read configuration
- err := prm.SetLevelString(c.LoggerCfg.level)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
- }
- err = prm.SetDestination(c.LoggerCfg.destination)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
- }
- prm.PrependTimestamp = c.LoggerCfg.timestamp
- prm.Options = c.LoggerCfg.options
- err = prm.SetTags(c.LoggerCfg.tags)
- if err != nil {
- // not expected since validation should be performed before
- return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
+func (c *cfg) loggerPrm() (*logger.Prm, error) {
+ // check if it has been inited before
+ if c.dynamicConfiguration.logger == nil {
+ c.dynamicConfiguration.logger = new(logger.Prm)
}
- return prm, nil
+ // (re)init read configuration
+ err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
+ if err != nil {
+ // not expected since validation should be performed before
+ panic(fmt.Sprintf("incorrect log level format: %s", c.LoggerCfg.level))
+ }
+
+ return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@@ -1095,8 +903,8 @@ func (c *cfg) LocalAddress() network.AddressGroup {
func initLocalStorage(ctx context.Context, c *cfg) {
ls := engine.New(c.engineOpts()...)
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
- ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ ls.HandleNewEpoch(ev.(netmap2.NewEpoch).EpochNumber())
})
// allocate memory for the service;
@@ -1104,15 +912,13 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.getSvc = new(getsvc.Service)
var shardsAttached int
- for _, optsWithMeta := range c.shardOpts(ctx) {
- id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts,
- shard.WithTombstoneSource(c.createTombstoneSource()),
- shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
+ for _, optsWithMeta := range c.shardOpts() {
+ id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
- c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
+ c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@@ -1122,50 +928,15 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
- c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
+ c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
- err := ls.Close(context.WithoutCancel(ctx))
+ err := ls.Close(context.Background())
if err != nil {
- c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
- zap.Error(err),
+ c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
+ zap.String("error", err.Error()),
)
} else {
- c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
- }
- })
-}
-
-func initAccessPolicyEngine(ctx context.Context, c *cfg) {
- var localOverrideDB chainbase.LocalOverrideDatabase
- if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
- c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
- localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
- } else {
- localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
- chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
- chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
- chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),
- )
- }
-
- var morphRuleStorage policy_engine.MorphRuleChainStorageReader
- morphRuleStorage = policy_client.NewContractStorage(
- client.NewSwitchRPCGuardedActor(c.cfgMorph.client),
- c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
-
- cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
- if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
- morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
- }
-
- ape := newAccessPolicyEngine(morphRuleStorage, localOverrideDB)
- c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine = ape
-
- c.onShutdown(func() {
- if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
- zap.Error(err),
- )
+ c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
@@ -1173,77 +944,86 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error
- replicatorPoolSize := replicatorconfig.PoolSize(cfg)
- pool.replication, err = ants.NewPool(replicatorPoolSize)
+ optNonBlocking := ants.WithNonblocking(true)
+
+ pool.putRemoteCapacity = objectconfig.Put(cfg).PoolSizeRemote()
+ pool.putRemote, err = ants.NewPool(pool.putRemoteCapacity, optNonBlocking)
+ fatalOnErr(err)
+
+ pool.putLocalCapacity = objectconfig.Put(cfg).PoolSizeLocal()
+ pool.putLocal, err = ants.NewPool(pool.putLocalCapacity, optNonBlocking)
+ fatalOnErr(err)
+
+ pool.replicatorPoolSize = replicatorconfig.PoolSize(cfg)
+ if pool.replicatorPoolSize <= 0 {
+ pool.replicatorPoolSize = pool.putRemoteCapacity
+ }
+
+ pool.replication, err = ants.NewPool(pool.replicatorPoolSize)
fatalOnErr(err)
return pool
}
-func (c *cfg) LocalNodeInfo() *netmap.NodeInfo {
- var res netmap.NodeInfo
+func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) {
+ var res netmapV2.NodeInfo
+
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- res = ni
+ ni.WriteToV2(&res)
} else {
- res = c.cfgNodeInfo.localInfo
+ c.cfgNodeInfo.localInfo.WriteToV2(&res)
}
- return &res
+
+ return &res, nil
}
-// setContractNodeInfo rewrites local node info from the FrostFS network map.
+// handleLocalNodeInfo rewrites local node info from the FrostFS network map.
// Called with nil when storage node is outside the FrostFS network map
// (before entering the network and after leaving it).
-func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
+func (c *cfg) handleLocalNodeInfo(ni *netmap.NodeInfo) {
c.cfgNetmap.state.setNodeInfo(ni)
}
-func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
- ni, err := c.netmapLocalNodeState(ctx, epoch)
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
- zap.Uint64("epoch", epoch),
- zap.Error(err))
- return
- }
-
- c.setContractNodeInfo(ni)
-}
-
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
-func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
+func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
- ni.SetStatus(state)
+ stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
- return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
+ return c.cfgNetmap.wrapper.AddPeer(prm)
}
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
-func bootstrapOnline(ctx context.Context, c *cfg) error {
- return c.bootstrapWithState(ctx, netmap.Online)
+func bootstrapOnline(c *cfg) error {
+ return c.bootstrapWithState((*netmap.NodeInfo).SetOnline)
}
// bootstrap calls bootstrapWithState with:
// - "maintenance" state if maintenance is in progress on the current node
// - "online", otherwise
-func (c *cfg) bootstrap(ctx context.Context) error {
+func (c *cfg) bootstrap() error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
- c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
- return c.bootstrapWithState(ctx, netmap.Maintenance)
+ c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
+ return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
}
- c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
+ c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
- return bootstrapOnline(ctx, c)
+ return bootstrapOnline(c)
+}
+
+// needBootstrap checks if local node should be registered in network on bootup.
+func (c *cfg) needBootstrap() bool {
+ return c.cfgNetmap.needBootstrap
}
type dCmp struct {
@@ -1253,148 +1033,68 @@ type dCmp struct {
func (c *cfg) signalWatcher(ctx context.Context) {
ch := make(chan os.Signal, 1)
- signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
-
- sighupCh := make(chan os.Signal, 1)
- signal.Notify(sighupCh, syscall.SIGHUP)
+ signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
for {
select {
- // signals causing application to shut down should have priority over
- // reconfiguration signal
- case <-ch:
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
-
- c.shutdown(ctx)
-
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
- return
- case err := <-c.internalErr: // internal application error
- c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
- zap.String("message", err.Error()))
-
- c.shutdown(ctx)
-
- c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
- return
- default:
- // block until any signal is receieved
- select {
- case <-sighupCh:
+ case sig := <-ch:
+ switch sig {
+ case syscall.SIGHUP:
c.reloadConfig(ctx)
- case <-ch:
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ case syscall.SIGTERM, syscall.SIGINT:
+ c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+ // TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
- c.shutdown(ctx)
+ c.shutdown()
- c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
- return
- case err := <-c.internalErr: // internal application error
- c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
- zap.String("message", err.Error()))
-
- c.shutdown(ctx)
-
- c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
}
+ case err := <-c.internalErr: // internal application error
+ c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+ zap.String("message", err.Error()))
+
+ c.shutdown()
+
+ c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
+ return
}
}
}
func (c *cfg) reloadConfig(ctx context.Context) {
- c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
- if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
- c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
- return
- }
- defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
-
- err := c.reloadAppConfig()
+ err := c.readConfig(c.appCfg)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
// all the components are expected to support
// Logger's dynamic reconfiguration approach
-
- components := c.getComponents(ctx)
-
- // Object
- c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
-
- // Storage Engine
-
- var rcfg engine.ReConfiguration
- for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts,
- shard.WithTombstoneSource(c.createTombstoneSource()),
- shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)),
- ))
- }
-
- err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
- return
- }
-
- for _, component := range components {
- err = component.reloadFunc()
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
- zap.String("component", component.name),
- zap.Error(err))
- }
- }
-
- if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
- c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
- return
- }
-
- c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
-}
-
-func (c *cfg) getComponents(ctx context.Context) []dCmp {
var components []dCmp
- components = append(components, dCmp{"logger", func() error {
- prm, err := c.loggerPrm()
- if err != nil {
- return err
- }
- logger.UpdateLevelForTags(prm)
- return nil
- }})
+ // Logger
+
+ logPrm, err := c.loggerPrm()
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
+ return
+ }
+
+ components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
- setRuntimeParameters(ctx, c)
+ setRuntimeParameters(c)
return nil
}})
- components = append(components, dCmp{"audit", func() error {
- c.audit.Store(audit.Enabled(c.appCfg))
- return nil
- }})
- components = append(components, dCmp{"pools", c.reloadPools})
components = append(components, dCmp{"tracing", func() error {
- traceConfig, err := tracingconfig.ToTracingConfig(c.appCfg)
- if err != nil {
- return err
- }
- updated, err := tracing.Setup(ctx, *traceConfig)
+ updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
if updated {
- c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
+ c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
- if c.treeService != nil {
- components = append(components, dCmp{"tree", func() error {
- c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys())
- return nil
- }})
- }
if cmp, updated := metricsComponent(c); updated {
if cmp.enabled {
cmp.preReload = enableMetricsSvc
@@ -1407,32 +1107,29 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
- components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
+ // Storage Engine
- return components
-}
-
-func (c *cfg) reloadPools() error {
- newSize := replicatorconfig.PoolSize(c.appCfg)
- c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
-
- return nil
-}
-
-func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
- oldSize := p.Cap()
- if oldSize != newSize {
- c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
- zap.Int("old", oldSize), zap.Int("new", newSize))
- p.Tune(newSize)
+ var rcfg engine.ReConfiguration
+ for _, optsWithID := range c.shardOpts() {
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
}
-}
-func (c *cfg) reloadAppConfig() error {
- unlock := c.LockAppConfigExclusive()
- defer unlock()
+ err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+ return
+ }
- return c.readConfig(c.appCfg)
+ for _, component := range components {
+ err = component.reloadFunc()
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
+ zap.String("component", component.name),
+ zap.Error(err))
+ }
+ }
+
+ c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
@@ -1447,34 +1144,13 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
return tombstoneSource
}
-func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
- return container.NewInfoProvider(func() (container.Source, error) {
- c.initMorphComponents(ctx)
- cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
- if err != nil {
- return nil, err
- }
- return containerClient.AsContainerSource(cc), nil
- })
-}
-
-func (c *cfg) shutdown(ctx context.Context) {
- old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
- if old == control.HealthStatus_SHUTTING_DOWN {
- c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
- return
- }
- if old == control.HealthStatus_STARTING {
- c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
- }
+func (c *cfg) shutdown() {
+ c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
c.ctxCancel()
- close(c.done)
+ c.done <- struct{}{}
for i := range c.closers {
c.closers[len(c.closers)-1-i].fn()
}
-
- if err := sdnotify.ClearStatus(); err != nil {
- c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
- }
+ close(c.internalErr)
}
diff --git a/cmd/frostfs-node/config/apiclient/config_test.go b/cmd/frostfs-node/config/apiclient/config_test.go
index cdfa5c401..f4f09d4ff 100644
--- a/cmd/frostfs-node/config/apiclient/config_test.go
+++ b/cmd/frostfs-node/config/apiclient/config_test.go
@@ -22,7 +22,7 @@ func TestApiclientSection(t *testing.T) {
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
require.Equal(t, 15*time.Second, apiclientconfig.DialTimeout(c))
require.Equal(t, 20*time.Second, apiclientconfig.StreamTimeout(c))
require.Equal(t, 30*time.Second, apiclientconfig.ReconnectTimeout(c))
diff --git a/cmd/frostfs-node/config/audit/config.go b/cmd/frostfs-node/config/audit/config.go
deleted file mode 100644
index 8f728c850..000000000
--- a/cmd/frostfs-node/config/audit/config.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package audit
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-
-const (
- subsection = "audit"
-)
-
-// Enabled returns the value of "enabled" config parameter from "audit" section.
-func Enabled(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "enabled")
-}
diff --git a/cmd/frostfs-node/config/audit/config_test.go b/cmd/frostfs-node/config/audit/config_test.go
deleted file mode 100644
index 7731cc8e6..000000000
--- a/cmd/frostfs-node/config/audit/config_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package audit
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestAuditSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
- require.Equal(t, false, Enabled(empty))
- })
-
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, true, Enabled(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go
index c40bf3620..36e53ea7c 100644
--- a/cmd/frostfs-node/config/calls.go
+++ b/cmd/frostfs-node/config/calls.go
@@ -1,7 +1,6 @@
package config
import (
- "slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
- x.defaultPath = slices.Clone(from.path)
+ x.defaultPath = make([]string, len(from.path))
+ copy(x.defaultPath, from.path)
}
diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go
index bc149eb7d..68bf1c679 100644
--- a/cmd/frostfs-node/config/calls_test.go
+++ b/cmd/frostfs-node/config/calls_test.go
@@ -1,6 +1,7 @@
package config_test
import (
+ "os"
"strings"
"testing"
@@ -37,7 +38,8 @@ func TestConfigEnv(t *testing.T) {
envName := strings.ToUpper(
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
- t.Setenv(envName, value)
+ err := os.Setenv(envName, value)
+ require.NoError(t, err)
c := configtest.EmptyConfig()
diff --git a/cmd/frostfs-node/config/cast.go b/cmd/frostfs-node/config/cast.go
index beec09a9e..9036c3ab0 100644
--- a/cmd/frostfs-node/config/cast.go
+++ b/cmd/frostfs-node/config/cast.go
@@ -16,7 +16,7 @@ func panicOnErr(err error) {
}
// StringSlice reads a configuration value
-// from c by name and casts it to []string.
+// from c by name and casts it to a []string.
//
// Panics if the value can not be casted.
func StringSlice(c *Config, name string) []string {
@@ -27,7 +27,7 @@ func StringSlice(c *Config, name string) []string {
}
// StringSliceSafe reads a configuration value
-// from c by name and casts it to []string.
+// from c by name and casts it to a []string.
//
// Returns nil if the value can not be casted.
func StringSliceSafe(c *Config, name string) []string {
@@ -35,7 +35,7 @@ func StringSliceSafe(c *Config, name string) []string {
}
// String reads a configuration value
-// from c by name and casts it to string.
+// from c by name and casts it to a string.
//
// Panics if the value can not be casted.
func String(c *Config, name string) string {
@@ -46,7 +46,7 @@ func String(c *Config, name string) string {
}
// StringSafe reads a configuration value
-// from c by name and casts it to string.
+// from c by name and casts it to a string.
//
// Returns "" if the value can not be casted.
func StringSafe(c *Config, name string) string {
@@ -223,15 +223,3 @@ func parseSizeInBytes(sizeStr string) uint64 {
size := cast.ToFloat64(sizeStr)
return safeMul(size, multiplier)
}
-
-// FloatOrDefault reads a configuration value
-// from c by name and casts it to float64.
-//
-// Returns defaultValue if the value can not be casted.
-func FloatOrDefault(c *Config, name string, defaultValue float64) float64 {
- v, err := cast.ToFloat64E(c.Value(name))
- if err != nil {
- return defaultValue
- }
- return v
-}
diff --git a/cmd/frostfs-node/config/config.go b/cmd/frostfs-node/config/config.go
index d74e820ac..77e34d613 100644
--- a/cmd/frostfs-node/config/config.go
+++ b/cmd/frostfs-node/config/config.go
@@ -38,6 +38,7 @@ func New(configFile, configDir, envPrefix string) *Config {
configViper.WithConfigFile(configFile),
configViper.WithConfigDir(configDir),
configViper.WithEnvPrefix(envPrefix))
+
if err != nil {
panic(err)
}
diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go
index ee9d4268b..ede15a522 100644
--- a/cmd/frostfs-node/config/configdir_test.go
+++ b/cmd/frostfs-node/config/configdir_test.go
@@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
- cfgFileName := path.Join(dir, "cfg_01.yml")
+ cfgFileName0 := path.Join(dir, "cfg_00.json")
+ cfgFileName1 := path.Join(dir, "cfg_01.yml")
- require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
+ require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0777))
+ require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
+ require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}
diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go
deleted file mode 100644
index 1cd64a6f8..000000000
--- a/cmd/frostfs-node/config/container/container.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package containerconfig
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-
-const (
- subsection = "container"
- listStreamSubsection = "list_stream"
-
- // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
- ContainerBatchSizeDefault = 1000
-)
-
-// ContainerBatchSize returns the value of "batch_size" config parameter
-// from "list_stream" subsection of "container" section.
-//
-// Returns ContainerBatchSizeDefault if the value is missing or if
-// the value is not positive integer.
-func ContainerBatchSize(c *config.Config) uint32 {
- if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
- return ContainerBatchSizeDefault
- }
- size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
- if size == 0 {
- return ContainerBatchSizeDefault
- }
- return size
-}
diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go
deleted file mode 100644
index 744cd3295..000000000
--- a/cmd/frostfs-node/config/container/container_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package containerconfig_test
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestContainerSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
- require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
- })
-
- const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/contracts/config.go b/cmd/frostfs-node/config/contracts/config.go
index df0c0b958..c5f14f3ca 100644
--- a/cmd/frostfs-node/config/contracts/config.go
+++ b/cmd/frostfs-node/config/contracts/config.go
@@ -38,10 +38,6 @@ func Container(c *config.Config) util.Uint160 {
return contractAddress(c, "container")
}
-func FrostfsID(c *config.Config) util.Uint160 {
- return contractAddress(c, "frostfsid")
-}
-
// Proxy returnsthe value of "proxy" config parameter
// from "contracts" section.
//
diff --git a/cmd/frostfs-node/config/contracts/config_test.go b/cmd/frostfs-node/config/contracts/config_test.go
index c85a625c5..d816ea1e4 100644
--- a/cmd/frostfs-node/config/contracts/config_test.go
+++ b/cmd/frostfs-node/config/contracts/config_test.go
@@ -35,7 +35,7 @@ func TestContractsSection(t *testing.T) {
expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f")
require.NoError(t, err)
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
balance := contractsconfig.Balance(c)
container := contractsconfig.Container(c)
netmap := contractsconfig.Netmap(c)
diff --git a/cmd/frostfs-node/config/control/config_test.go b/cmd/frostfs-node/config/control/config_test.go
index f702d83ae..1aed229e2 100644
--- a/cmd/frostfs-node/config/control/config_test.go
+++ b/cmd/frostfs-node/config/control/config_test.go
@@ -24,7 +24,7 @@ func TestControlSection(t *testing.T) {
pubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
pubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
require.Equal(t, pubs, controlconfig.AuthorizedKeys(c))
require.Equal(t, "localhost:8090", controlconfig.GRPC(c).Endpoint())
}
diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go
index 7994e7809..c944d1c58 100644
--- a/cmd/frostfs-node/config/engine/config.go
+++ b/cmd/frostfs-node/config/engine/config.go
@@ -11,6 +11,10 @@ import (
const (
subsection = "storage"
+
+ // ShardPoolSizeDefault is a default value of routine pool size per-shard to
+ // process object PUT operations in a storage engine.
+ ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@@ -37,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
c.Sub(si),
)
- if sc.Mode() == mode.Disabled {
- continue
- }
-
// Path for the blobstor can't be present in the default section, because different shards
// must have different paths, so if it is missing, the shard is not here.
// At the same time checking for "blobstor" section doesn't work proper
@@ -50,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
}
(*config.Config)(sc).SetDefault(def)
+ if sc.Mode() == mode.Disabled {
+ continue
+ }
+
if err := f(sc); err != nil {
return err
}
@@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
+// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
+//
+// Returns ShardPoolSizeDefault if the value is not a positive number.
+func ShardPoolSize(c *config.Config) uint32 {
+ v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
+ if v > 0 {
+ return v
+ }
+
+ return ShardPoolSizeDefault
+}
+
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 401c54edc..b8e95db6d 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -12,30 +12,11 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
- writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
-func TestIterateShards(t *testing.T) {
- fileConfigTest := func(c *config.Config) {
- var res []string
- require.NoError(t,
- engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
- res = append(res, sc.Metabase().Path())
- return nil
- }))
- require.Equal(t, []string{"abc", "xyz"}, res)
- }
-
- const cfgDir = "./testdata/shards"
- configtest.ForEachFileType(cfgDir, fileConfigTest)
- configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
-}
-
func TestEngineSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
@@ -55,15 +36,17 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
+ require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
+ require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@@ -76,7 +59,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages()
pl := sc.Pilorama()
gc := sc.GC()
- limits := sc.Limits()
switch num {
case 0:
@@ -90,39 +72,32 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, wc.NoSync())
require.Equal(t, "tmp/0/cache", wc.Path())
+ require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
- require.EqualValues(t, 30, wc.WorkerCount())
+ require.EqualValues(t, 30, wc.WorkersNumber())
require.EqualValues(t, 3221225472, wc.SizeLimit())
- require.EqualValues(t, 49, wc.CountLimit())
- require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/0/meta", meta.Path())
- require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
+ require.Equal(t, fs.FileMode(0644), meta.BoltDB().Perm())
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, true, sc.Compression().Enabled)
- require.Equal(t, compression.LevelFastest, sc.Compression().Level)
- require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes)
- require.Equal(t, true, sc.Compression().EstimateCompressibility)
- require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold)
+ require.Equal(t, true, sc.Compress())
+ require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
blz := blobovniczaconfig.From((*config.Config)(ss[0]))
require.Equal(t, "tmp/0/blob/blobovnicza", ss[0].Path())
- require.EqualValues(t, 0o644, blz.BoltDB().Perm())
+ require.EqualValues(t, 0644, blz.BoltDB().Perm())
require.EqualValues(t, 4194304, blz.Size())
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
- require.EqualValues(t, time.Minute, blz.OpenedCacheTTL())
- require.EqualValues(t, 30*time.Second, blz.OpenedCacheExpInterval())
- require.EqualValues(t, 10, blz.InitWorkerCount())
- require.EqualValues(t, 30*time.Second, blz.RebuildDropTimeout())
+ require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/0/blob", ss[1].Path())
- require.EqualValues(t, 0o644, ss[1].Perm())
+ require.EqualValues(t, 0644, ss[1].Perm())
fst := fstreeconfig.From((*config.Config)(ss[1]))
require.EqualValues(t, 5, fst.Depth())
@@ -131,94 +106,13 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 150, gc.RemoverBatchSize())
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
require.Equal(t, 1500, gc.ExpiredCollectorBatchSize())
- require.Equal(t, 15, gc.ExpiredCollectorWorkerCount())
+ require.Equal(t, 15, gc.ExpiredCollectorWorkersCount())
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
- require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
-
- readLimits := limits.ToConfig().Read
- writeLimits := limits.ToConfig().Write
- require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
- require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
- require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
- require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
- require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
- require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
- require.ElementsMatch(t, readLimits.Tags,
- []qos.IOTagConfig{
- {
- Tag: "internal",
- Weight: toPtr(20),
- ReservedOps: toPtr(1000),
- LimitOps: toPtr(0),
- },
- {
- Tag: "client",
- Weight: toPtr(70),
- ReservedOps: toPtr(10000),
- },
- {
- Tag: "background",
- Weight: toPtr(5),
- LimitOps: toPtr(10000),
- ReservedOps: toPtr(0),
- },
- {
- Tag: "writecache",
- Weight: toPtr(5),
- LimitOps: toPtr(25000),
- },
- {
- Tag: "policer",
- Weight: toPtr(5),
- LimitOps: toPtr(25000),
- Prohibited: true,
- },
- {
- Tag: "treesync",
- Weight: toPtr(5),
- LimitOps: toPtr(25),
- },
- })
- require.ElementsMatch(t, writeLimits.Tags,
- []qos.IOTagConfig{
- {
- Tag: "internal",
- Weight: toPtr(200),
- ReservedOps: toPtr(100),
- LimitOps: toPtr(0),
- },
- {
- Tag: "client",
- Weight: toPtr(700),
- ReservedOps: toPtr(1000),
- },
- {
- Tag: "background",
- Weight: toPtr(50),
- LimitOps: toPtr(1000),
- ReservedOps: toPtr(0),
- },
- {
- Tag: "writecache",
- Weight: toPtr(50),
- LimitOps: toPtr(2500),
- },
- {
- Tag: "policer",
- Weight: toPtr(50),
- LimitOps: toPtr(2500),
- },
- {
- Tag: "treesync",
- Weight: toPtr(50),
- LimitOps: toPtr(100),
- },
- })
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
- require.Equal(t, fs.FileMode(0o644), pl.Perm())
+ require.Equal(t, fs.FileMode(0644), pl.Perm())
require.True(t, pl.NoSync())
require.Equal(t, 5*time.Millisecond, pl.MaxBatchDelay())
require.Equal(t, 100, pl.MaxBatchSize())
@@ -227,20 +121,18 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, wc.NoSync())
require.Equal(t, "tmp/1/cache", wc.Path())
+ require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
- require.EqualValues(t, 30, wc.WorkerCount())
+ require.EqualValues(t, 30, wc.WorkersNumber())
require.EqualValues(t, 4294967296, wc.SizeLimit())
- require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
- require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/1/meta", meta.Path())
- require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
+ require.Equal(t, fs.FileMode(0644), meta.BoltDB().Perm())
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
- require.Equal(t, false, sc.Compression().Enabled)
- require.Equal(t, compression.LevelDefault, sc.Compression().Level)
- require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes)
+ require.Equal(t, false, sc.Compress())
+ require.Equal(t, []string(nil), sc.UncompressableContentTypes())
require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss))
@@ -251,13 +143,10 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
- require.EqualValues(t, 5*time.Minute, blz.OpenedCacheTTL())
- require.EqualValues(t, 15*time.Second, blz.OpenedCacheExpInterval())
- require.EqualValues(t, blobovniczaconfig.InitWorkerCountDefault, blz.InitWorkerCount())
- require.EqualValues(t, blobovniczaconfig.RebuildDropTimeoutDefault, blz.RebuildDropTimeout())
+ require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/1/blob", ss[1].Path())
- require.EqualValues(t, 0o644, ss[1].Perm())
+ require.EqualValues(t, 0644, ss[1].Perm())
fst := fstreeconfig.From((*config.Config)(ss[1]))
require.EqualValues(t, 5, fst.Depth())
@@ -266,22 +155,10 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 200, gc.RemoverBatchSize())
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize())
- require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkerCount())
+ require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkersCount())
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
- require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
-
- readLimits := limits.ToConfig().Read
- writeLimits := limits.ToConfig().Write
- require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
- require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
- require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
- require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
- require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
- require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
- require.Equal(t, 0, len(readLimits.Tags))
- require.Equal(t, 0, len(writeLimits.Tags))
}
return nil
})
@@ -295,7 +172,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}
-
-func toPtr(v float64) *float64 {
- return &v
-}
diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go
index ac69c4c4f..a780ea927 100644
--- a/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go
+++ b/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza/config.go
@@ -1,8 +1,6 @@
package blobovniczaconfig
import (
- "time"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
@@ -24,18 +22,6 @@ const (
// OpenedCacheSizeDefault is a default cache size of opened Blobovnicza's.
OpenedCacheSizeDefault = 16
-
- // OpenedCacheTTLDefault is a default cache ttl of opened Blobovnicza's.
- OpenedCacheTTLDefault = 0 // means expiring is off
-
- // OpenedCacheExpIntervalDefault is a default cache cleanup interval for expired Blobovnicza's.
- OpenedCacheExpIntervalDefault = 15 * time.Second
-
- // InitWorkerCountDefault is a default workers count to initialize Blobovnicza's.
- InitWorkerCountDefault = 5
-
- // RebuildDropTimeoutDefault is a default timeout value to wait before drop single blobovnicza.
- RebuildDropTimeoutDefault = 10 * time.Second
)
// From wraps config section into Config.
@@ -112,69 +98,17 @@ func (x *Config) OpenedCacheSize() int {
return OpenedCacheSizeDefault
}
-// OpenedCacheTTL returns the value of "opened_cache_ttl" config parameter.
-//
-// Returns OpenedCacheTTLDefault if the value is not a positive number.
-func (x *Config) OpenedCacheTTL() time.Duration {
- d := config.DurationSafe(
- (*config.Config)(x),
- "opened_cache_ttl",
- )
-
- if d > 0 {
- return d
- }
-
- return OpenedCacheTTLDefault
-}
-
-// OpenedCacheExpInterval returns the value of "opened_cache_exp_interval" config parameter.
-//
-// Returns OpenedCacheExpIntervalDefault if the value is not a positive number.
-func (x *Config) OpenedCacheExpInterval() time.Duration {
- d := config.DurationSafe(
- (*config.Config)(x),
- "opened_cache_exp_interval",
- )
-
- if d > 0 {
- return d
- }
-
- return OpenedCacheExpIntervalDefault
-}
-
// BoltDB returns config instance for querying bolt db specific parameters.
func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
-// InitWorkerCount returns the value of "init_worker_count" config parameter.
+// LeafWidth returns the value of "leaf_width" config parameter.
//
-// Returns InitWorkerCountDefault if the value is not a positive number.
-func (x *Config) InitWorkerCount() int {
- d := config.IntSafe(
+// Returns 0 if the value is not a positive number.
+func (x *Config) LeafWidth() uint64 {
+ return config.UintSafe(
(*config.Config)(x),
- "init_worker_count",
+ "leaf_width",
)
-
- if d > 0 {
- return int(d)
- }
-
- return InitWorkerCountDefault
-}
-
-// RebuildDropTimeout returns the value of "rebuild_drop_timeout" config parameter.
-//
-// Returns RebuildDropTimeoutDefault if the value is not defined or invalid.
-func (x *Config) RebuildDropTimeout() time.Duration {
- d := config.DurationSafe(
- (*config.Config)(x),
- "rebuild_drop_timeout",
- )
- if d > 0 {
- return d
- }
- return RebuildDropTimeoutDefault
}
diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go
index e83c69de2..649e4980d 100644
--- a/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go
+++ b/cmd/frostfs-node/config/engine/shard/blobstor/storage/config.go
@@ -9,7 +9,7 @@ import (
type Config config.Config
// PermDefault are default permission bits for BlobStor data.
-const PermDefault = 0o660
+const PermDefault = 0660
func From(x *config.Config) *Config {
return (*Config)(x)
diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
index b564d36f8..83d4e45c5 100644
--- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
+++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
@@ -13,7 +13,7 @@ type Config config.Config
const (
// PermDefault is a default permission bits for metabase file.
- PermDefault = 0o660
+ PermDefault = 0660
)
// Perm returns the value of "perm" config parameter as a fs.FileMode.
@@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- return max(d, 0)
+ if d < 0 {
+ d = 0
+ }
+ return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- return max(s, 0)
+ if s < 0 {
+ s = 0
+ }
+ return s
}
// NoSync returns the value of "no_sync" config parameter.
@@ -54,11 +60,3 @@ func (x *Config) MaxBatchSize() int {
func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
-
-// PageSize returns the value of "page_size" config parameter.
-//
-// Returns 0 if the value is not a positive number.
-func (x *Config) PageSize() int {
- s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
- return max(s, 0)
-}
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index d42646da7..1dc32fb86 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -1,14 +1,14 @@
package shardconfig
import (
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
- limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@@ -16,44 +16,31 @@ import (
// which provides access to Shard configurations.
type Config config.Config
-const (
- // SmallSizeLimitDefault is a default limit of small objects payload in bytes.
- SmallSizeLimitDefault = 1 << 20
- EstimateCompressibilityThresholdDefault = 0.1
- RefillMetabaseWorkersCountDefault = 500
-)
+// SmallSizeLimitDefault is a default limit of small objects payload in bytes.
+const SmallSizeLimitDefault = 1 << 20
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
-func (x *Config) Compression() compression.Config {
- cc := (*config.Config)(x).Sub("compression")
- if cc == nil {
- return compression.Config{}
- }
- return compression.Config{
- Enabled: config.BoolSafe(cc, "enabled"),
- UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"),
- Level: compression.Level(config.StringSafe(cc, "level")),
- EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"),
- EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc),
- }
+// Compress returns the value of "compress" config parameter.
+//
+// Returns false if the value is not a valid bool.
+func (x *Config) Compress() bool {
+ return config.BoolSafe(
+ (*config.Config)(x),
+ "compress",
+ )
}
-// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter.
+// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
//
-// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0].
-func estimateCompressibilityThreshold(c *config.Config) float64 {
- v := config.FloatOrDefault(
- c,
- "estimate_compressibility_threshold",
- EstimateCompressibilityThresholdDefault)
- if v < 0.0 || v > 1.0 {
- return EstimateCompressibilityThresholdDefault
- }
- return v
+// Returns nil if a the value is missing or is invalid.
+func (x *Config) UncompressableContentTypes() []string {
+ return config.StringSliceSafe(
+ (*config.Config)(x),
+ "compression_exclude_content_types")
}
// SmallSizeLimit returns the value of "small_object_size" config parameter.
@@ -112,14 +99,6 @@ func (x *Config) GC() *gcconfig.Config {
)
}
-// Limits returns "limits" subsection as a limitsconfig.Config.
-func (x *Config) Limits() *limitsconfig.Config {
- return limitsconfig.From(
- (*config.Config)(x).
- Sub("limits"),
- )
-}
-
// RefillMetabase returns the value of "resync_metabase" config parameter.
//
// Returns false if the value is not a valid bool.
@@ -130,20 +109,6 @@ func (x *Config) RefillMetabase() bool {
)
}
-// RefillMetabaseWorkersCount returns the value of "resync_metabase_worker_count" config parameter.
-//
-// Returns RefillMetabaseWorkersCountDefault if the value is not a positive number.
-func (x *Config) RefillMetabaseWorkersCount() int {
- v := config.IntSafe(
- (*config.Config)(x),
- "resync_metabase_worker_count",
- )
- if v > 0 {
- return int(v)
- }
- return RefillMetabaseWorkersCountDefault
-}
-
// Mode return the value of "mode" config parameter.
//
// Panics if read the value is not one of predefined
@@ -166,7 +131,7 @@ func (x *Config) Mode() (m mode.Mode) {
case "disabled":
m = mode.Disabled
default:
- panic("unknown shard mode: " + s)
+ panic(fmt.Sprintf("unknown shard mode: %s", s))
}
return
diff --git a/cmd/frostfs-node/config/engine/shard/gc/config.go b/cmd/frostfs-node/config/engine/shard/gc/config.go
index 8cb90d3ff..0500697c8 100644
--- a/cmd/frostfs-node/config/engine/shard/gc/config.go
+++ b/cmd/frostfs-node/config/engine/shard/gc/config.go
@@ -63,14 +63,14 @@ func (x *Config) RemoverSleepInterval() time.Duration {
return RemoverSleepIntervalDefault
}
-// ExpiredCollectorWorkerCount returns the value of "expired_collector_worker_count"
+// ExpiredCollectorWorkersCount returns the value of "expired_collector_workers_count"
// config parameter.
//
// Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number.
-func (x *Config) ExpiredCollectorWorkerCount() int {
+func (x *Config) ExpiredCollectorWorkersCount() int {
s := config.IntSafe(
(*config.Config)(x),
- "expired_collector_worker_count",
+ "expired_collector_workers_count",
)
if s > 0 {
diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go
deleted file mode 100644
index ccd1e0000..000000000
--- a/cmd/frostfs-node/config/engine/shard/limits/config.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package limits
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "github.com/spf13/cast"
-)
-
-// From wraps config section into Config.
-func From(c *config.Config) *Config {
- return (*Config)(c)
-}
-
-// Config is a wrapper over the config section
-// which provides access to Shard's limits configurations.
-type Config config.Config
-
-func (x *Config) ToConfig() qos.LimiterConfig {
- result := qos.LimiterConfig{
- Read: x.read(),
- Write: x.write(),
- }
- panicOnErr(result.Validate())
- return result
-}
-
-func (x *Config) read() qos.OpConfig {
- return x.parse("read")
-}
-
-func (x *Config) write() qos.OpConfig {
- return x.parse("write")
-}
-
-func (x *Config) parse(sub string) qos.OpConfig {
- c := (*config.Config)(x).Sub(sub)
- var result qos.OpConfig
-
- if s := config.Int(c, "max_waiting_ops"); s > 0 {
- result.MaxWaitingOps = s
- } else {
- result.MaxWaitingOps = qos.NoLimit
- }
-
- if s := config.Int(c, "max_running_ops"); s > 0 {
- result.MaxRunningOps = s
- } else {
- result.MaxRunningOps = qos.NoLimit
- }
-
- if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
- result.IdleTimeout = s
- } else {
- result.IdleTimeout = qos.DefaultIdleTimeout
- }
-
- result.Tags = tags(c)
-
- return result
-}
-
-func tags(c *config.Config) []qos.IOTagConfig {
- c = c.Sub("tags")
- var result []qos.IOTagConfig
- for i := 0; ; i++ {
- tag := config.String(c, strconv.Itoa(i)+".tag")
- if tag == "" {
- return result
- }
-
- var tagConfig qos.IOTagConfig
- tagConfig.Tag = tag
-
- v := c.Value(strconv.Itoa(i) + ".weight")
- if v != nil {
- w, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.Weight = &w
- }
-
- v = c.Value(strconv.Itoa(i) + ".limit_ops")
- if v != nil {
- l, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.LimitOps = &l
- }
-
- v = c.Value(strconv.Itoa(i) + ".reserved_ops")
- if v != nil {
- r, err := cast.ToFloat64E(v)
- panicOnErr(err)
- tagConfig.ReservedOps = &r
- }
-
- v = c.Value(strconv.Itoa(i) + ".prohibited")
- if v != nil {
- r, err := cast.ToBoolE(v)
- panicOnErr(err)
- tagConfig.Prohibited = r
- }
-
- result = append(result, tagConfig)
- }
-}
-
-func panicOnErr(err error) {
- if err != nil {
- panic(err)
- }
-}
diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
index 5d4e8f408..7ac0eca91 100644
--- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go
+++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go
@@ -13,7 +13,7 @@ type Config config.Config
const (
// PermDefault is a default permission bits for metabase file.
- PermDefault = 0o660
+ PermDefault = 0660
)
// From wraps config section into Config.
@@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
- return max(d, 0)
+ if d <= 0 {
+ d = 0
+ }
+ return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
- return max(s, 0)
+ if s <= 0 {
+ s = 0
+ }
+ return s
}
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index 6fff0308b..504fe3ca2 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -1,7 +1,12 @@
package writecacheconfig
import (
+ "fmt"
+ "time"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
)
// Config is a wrapper over the config section
@@ -9,6 +14,9 @@ import (
type Config config.Config
const (
+ // SmallSizeDefault is a default size of small objects.
+ SmallSizeDefault = 32 << 10
+
// MaxSizeDefault is a default value of the object payload size limit.
MaxSizeDefault = 64 << 20
@@ -18,10 +26,8 @@ const (
// SizeLimitDefault is a default write-cache size limit.
SizeLimitDefault = 1 << 30
- // CountLimitDefault is a default write-cache count limit.
- CountLimitDefault = 0
-
- MaxFlushingObjectsSizeDefault = 128 << 20
+ // DefaultGCInterval is the default duration of the GC cycle interval.
+ DefaultGCInterval = 1 * time.Minute
)
// From wraps config section into Config.
@@ -36,6 +42,22 @@ func (x *Config) Enabled() bool {
return config.Bool((*config.Config)(x), "enabled")
}
+// Type returns the writecache implementation type to use.
+//
+// Panics if the type is not recognized.
+func (x *Config) Type() writecacheconfig.Type {
+ t := config.String((*config.Config)(x), "type")
+
+ switch t {
+ case "bbolt", "":
+ return writecacheconfig.TypeBBolt
+ case "badger":
+ return writecacheconfig.TypeBadger
+ }
+
+ panic(fmt.Sprintf("invalid writecache type: %q", t))
+}
+
// Path returns the value of "path" config parameter.
//
// Panics if the value is not a non-empty string.
@@ -52,6 +74,22 @@ func (x *Config) Path() string {
return p
}
+// SmallObjectSize returns the value of "small_object_size" config parameter.
+//
+// Returns SmallSizeDefault if the value is not a positive number.
+func (x *Config) SmallObjectSize() uint64 {
+ s := config.SizeInBytesSafe(
+ (*config.Config)(x),
+ "small_object_size",
+ )
+
+ if s > 0 {
+ return s
+ }
+
+ return SmallSizeDefault
+}
+
// MaxObjectSize returns the value of "max_object_size" config parameter.
//
// Returns MaxSizeDefault if the value is not a positive number.
@@ -68,13 +106,13 @@ func (x *Config) MaxObjectSize() uint64 {
return MaxSizeDefault
}
-// WorkerCount returns the value of "flush_worker_count" config parameter.
+// WorkersNumber returns the value of "workers_number" config parameter.
//
// Returns WorkersNumberDefault if the value is not a positive number.
-func (x *Config) WorkerCount() int {
+func (x *Config) WorkersNumber() int {
c := config.IntSafe(
(*config.Config)(x),
- "flush_worker_count",
+ "workers_number",
)
if c > 0 {
@@ -100,22 +138,6 @@ func (x *Config) SizeLimit() uint64 {
return SizeLimitDefault
}
-// CountLimit returns the value of "max_object_count" config parameter.
-//
-// Returns CountLimitDefault if the value is not a positive number.
-func (x *Config) CountLimit() uint64 {
- c := config.SizeInBytesSafe(
- (*config.Config)(x),
- "max_object_count",
- )
-
- if c > 0 {
- return c
- }
-
- return CountLimitDefault
-}
-
// NoSync returns the value of "no_sync" config parameter.
//
// Returns false if the value is not a boolean.
@@ -123,18 +145,20 @@ func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
-// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter.
-//
-// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number.
-func (x *Config) MaxFlushingObjectsSize() uint64 {
- s := config.SizeInBytesSafe(
- (*config.Config)(x),
- "max_flushing_objects_size",
- )
+// BoltDB returns config instance for querying bolt db specific parameters.
+func (x *Config) BoltDB() *boltdbconfig.Config {
+ return (*boltdbconfig.Config)(x)
+}
- if s > 0 {
- return s
+// GCInterval returns the value of "gc_interval" config parameter.
+//
+// Returns DefaultGCInterval if the value is not a positive duration.
+func (x *Config) GCInterval() time.Duration {
+ d := config.DurationSafe((*config.Config)(x), "gc_interval")
+
+ if d > 0 {
+ return d
}
- return MaxFlushingObjectsSizeDefault
+ return DefaultGCInterval
}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env
deleted file mode 100644
index 079789b0f..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.env
+++ /dev/null
@@ -1,3 +0,0 @@
-FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
-FROSTFS_STORAGE_SHARD_1_MODE=disabled
-FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json
deleted file mode 100644
index b3d6abe85..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "storage.shard": {
- "0": {
- "metabase.path": "abc"
- },
- "1": {
- "mode": "disabled"
- },
- "2": {
- "metabase.path": "xyz"
- }
- }
-}
diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml
deleted file mode 100644
index bbbba3af8..000000000
--- a/cmd/frostfs-node/config/engine/testdata/shards.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-storage.shard:
- 0:
- metabase.path: abc
- 1:
- mode: disabled
- 2:
- metabase.path: xyz
diff --git a/cmd/frostfs-node/config/grpc/config.go b/cmd/frostfs-node/config/grpc/config.go
index 37dd76426..c25d2e717 100644
--- a/cmd/frostfs-node/config/grpc/config.go
+++ b/cmd/frostfs-node/config/grpc/config.go
@@ -3,7 +3,6 @@ package grpcconfig
import (
"errors"
"strconv"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
@@ -110,17 +109,3 @@ func IterateEndpoints(c *config.Config, f func(*Config)) {
panic("no gRPC server configured")
}
}
-
-const DefaultReconnectInterval = time.Minute
-
-// ReconnectTimeout returns the value of "reconnect_interval" gRPC config parameter.
-//
-// Returns DefaultReconnectInterval if value is not defined or invalid.
-func ReconnectTimeout(c *config.Config) time.Duration {
- grpcConf := c.Sub("grpc")
- ri := config.DurationSafe(grpcConf, "reconnect_interval")
- if ri > 0 {
- return ri
- }
- return DefaultReconnectInterval
-}
diff --git a/cmd/frostfs-node/config/grpc/config_test.go b/cmd/frostfs-node/config/grpc/config_test.go
index 13ce4294e..1ada792ec 100644
--- a/cmd/frostfs-node/config/grpc/config_test.go
+++ b/cmd/frostfs-node/config/grpc/config_test.go
@@ -17,7 +17,7 @@ func TestGRPCSection(t *testing.T) {
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
num := 0
IterateEndpoints(c, func(sc *Config) {
diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go
index 20f373184..78e4377a9 100644
--- a/cmd/frostfs-node/config/logger/config.go
+++ b/cmd/frostfs-node/config/logger/config.go
@@ -1,24 +1,12 @@
package loggerconfig
import (
- "os"
- "strconv"
- "time"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore/loki"
)
const (
// LevelDefault is a default logger level.
- LevelDefault = "info"
- DestinationDefault = logger.DestinationStdout
- subsection = "logger"
- lokiSubsection = "loki"
- AddressDefault = "localhost:3100"
- BatchEntriesNumberDefault = 100
- BatchWaitDefault = time.Second
+ LevelDefault = "info"
)
// Level returns the value of "level" config parameter
@@ -27,7 +15,7 @@ const (
// Returns LevelDefault if the value is not a non-empty string.
func Level(c *config.Config) string {
v := config.StringSafe(
- c.Sub(subsection),
+ c.Sub("logger"),
"level",
)
if v != "" {
@@ -36,83 +24,3 @@ func Level(c *config.Config) string {
return LevelDefault
}
-
-// Destination returns the value of "destination" config parameter
-// from "logger" section.
-//
-// Returns DestinationDefault if the value is not a non-empty string.
-func Destination(c *config.Config) string {
- v := config.StringSafe(
- c.Sub(subsection),
- "destination",
- )
- if v != "" {
- return v
- }
-
- return DestinationDefault
-}
-
-// Timestamp returns the value of "timestamp" config parameter
-// from "logger" section.
-//
-// Returns false if the value isn't specified.
-func Timestamp(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "timestamp")
-}
-
-// Tags returns the value of "tags" config parameter from "logger" section.
-func Tags(c *config.Config) [][]string {
- var res [][]string
- sub := c.Sub(subsection).Sub("tags")
- for i := 0; ; i++ {
- s := sub.Sub(strconv.FormatInt(int64(i), 10))
- names := config.StringSafe(s, "names")
- if names == "" {
- break
- }
- res = append(res, []string{names, config.StringSafe(s, "level")})
- }
- return res
-}
-
-// ToLokiConfig extracts loki config.
-func ToLokiConfig(c *config.Config) loki.Config {
- hostname, _ := os.Hostname()
- return loki.Config{
- Enabled: config.BoolSafe(c.Sub(subsection).Sub(lokiSubsection), "enabled"),
- BatchWait: getBatchWait(c),
- BatchEntriesNumber: getBatchEntriesNumber(c),
- Endpoint: getEndpoint(c),
- Labels: map[string]string{
- "hostname": hostname,
- },
- }
-}
-
-func getBatchWait(c *config.Config) time.Duration {
- v := config.DurationSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_delay")
- if v > 0 {
- return v
- }
-
- return BatchWaitDefault
-}
-
-func getBatchEntriesNumber(c *config.Config) int {
- v := config.IntSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_size")
- if v > 0 {
- return int(v)
- }
-
- return BatchEntriesNumberDefault
-}
-
-func getEndpoint(c *config.Config) string {
- v := config.StringSafe(c.Sub(subsection).Sub(lokiSubsection), "endpoint")
- if v != "" {
- return v
- }
-
- return AddressDefault
-}
diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go
index 796ad529e..8d37bdb1b 100644
--- a/cmd/frostfs-node/config/logger/config_test.go
+++ b/cmd/frostfs-node/config/logger/config_test.go
@@ -11,20 +11,15 @@ import (
func TestLoggerSection_Level(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
- require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig()))
- require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig()))
- require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig()))
+ v := loggerconfig.Level(configtest.EmptyConfig())
+ require.Equal(t, loggerconfig.LevelDefault, v)
})
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, "debug", loggerconfig.Level(c))
- require.Equal(t, "journald", loggerconfig.Destination(c))
- require.Equal(t, true, loggerconfig.Timestamp(c))
- tags := loggerconfig.Tags(c)
- require.Equal(t, "main, morph", tags[0][0])
- require.Equal(t, "debug", tags[0][1])
+ var fileConfigTest = func(c *config.Config) {
+ v := loggerconfig.Level(c)
+ require.Equal(t, "debug", v)
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/cmd/frostfs-node/config/metrics/config_test.go b/cmd/frostfs-node/config/metrics/config_test.go
index c2a1b1fc4..4c03729ee 100644
--- a/cmd/frostfs-node/config/metrics/config_test.go
+++ b/cmd/frostfs-node/config/metrics/config_test.go
@@ -22,7 +22,7 @@ func TestMetricsSection(t *testing.T) {
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
to := metricsconfig.ShutdownTimeout(c)
addr := metricsconfig.Address(c)
diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go
index a9f774d18..4ab608ef3 100644
--- a/cmd/frostfs-node/config/morph/config.go
+++ b/cmd/frostfs-node/config/morph/config.go
@@ -1,7 +1,7 @@
package morphconfig
import (
- "errors"
+ "fmt"
"strconv"
"time"
@@ -10,7 +10,8 @@ import (
)
const (
- subsection = "morph"
+ subsection = "morph"
+ notarySubsection = "notary"
// DialTimeoutDefault is a default dial timeout of morph chain client connection.
DialTimeoutDefault = 5 * time.Second
@@ -24,22 +25,8 @@ const (
// SwitchIntervalDefault is a default Neo RPCs switch interval.
SwitchIntervalDefault = 2 * time.Minute
-
- // APEChainCacheSizeDefault is a default value of APE chain cache.
- APEChainCacheSizeDefault = 10_000
-
- // FrostfsIDCacheSizeDefault is a default value of APE chain cache.
- FrostfsIDCacheSizeDefault = 10_000
-
- // ContainerCacheSizeDefault represents the default size for the container cache.
- ContainerCacheSizeDefault = 100
-
- // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
- PollCandidatesTimeoutDefault = 20 * time.Second
)
-var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
-
// RPCEndpoint returns list of the values of "rpc_endpoint" config parameter
// from "morph" section.
//
@@ -60,25 +47,14 @@ func RPCEndpoint(c *config.Config) []client.Endpoint {
priority = PriorityDefault
}
- var mtlsConfig *client.MTLSConfig
- rootCAs := config.StringSliceSafe(s, "trusted_ca_list")
- if len(rootCAs) != 0 {
- mtlsConfig = &client.MTLSConfig{
- TrustedCAList: rootCAs,
- KeyFile: config.StringSafe(s, "key"),
- CertFile: config.StringSafe(s, "certificate"),
- }
- }
-
es = append(es, client.Endpoint{
- Address: addr,
- Priority: priority,
- MTLSConfig: mtlsConfig,
+ Address: addr,
+ Priority: priority,
})
}
if len(es) == 0 {
- panic(errNoMorphEndpoints)
+ panic(fmt.Errorf("no morph chain RPC endpoints, see `morph.rpc_endpoint` section"))
}
return es
}
@@ -109,18 +85,6 @@ func CacheTTL(c *config.Config) time.Duration {
return CacheTTLDefault
}
-// ContainerCacheSize returns the value of "container_cache_size" config parameter
-// from "morph" section.
-//
-// Returns 0 if the value is not positive integer.
-// Returns ContainerCacheSizeDefault if the value is missing.
-func ContainerCacheSize(c *config.Config) uint32 {
- if c.Sub(subsection).Value("container_cache_size") == nil {
- return ContainerCacheSizeDefault
- }
- return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
-}
-
// SwitchInterval returns the value of "switch_interval" config parameter
// from "morph" section.
//
@@ -133,41 +97,3 @@ func SwitchInterval(c *config.Config) time.Duration {
return SwitchIntervalDefault
}
-
-// APEChainCacheSize returns the value of "ape_chain_cache_size" config parameter
-// from "morph" section.
-//
-// Returns 0 if the value is not positive integer.
-// Returns APEChainCacheSizeDefault if the value is missing.
-func APEChainCacheSize(c *config.Config) uint32 {
- if c.Sub(subsection).Value("ape_chain_cache_size") == nil {
- return APEChainCacheSizeDefault
- }
- return config.Uint32Safe(c.Sub(subsection), "ape_chain_cache_size")
-}
-
-// FrostfsIDCacheSize returns the value of "frostfsid_cache_size" config parameter
-// from "morph" section.
-//
-// Returns 0 if the value is not positive integer.
-// Returns FrostfsIDCacheSizeDefault if the value is missing.
-func FrostfsIDCacheSize(c *config.Config) uint32 {
- if c.Sub(subsection).Value("frostfsid_cache_size") == nil {
- return FrostfsIDCacheSizeDefault
- }
- return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
-}
-
-// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
-// from "morph" section.
-//
-// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
-func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
- v := config.DurationSafe(c.Sub(subsection).
- Sub("netmap").Sub("candidates"), "poll_interval")
- if v > 0 {
- return v
- }
-
- return PollCandidatesTimeoutDefault
-}
diff --git a/cmd/frostfs-node/config/morph/config_test.go b/cmd/frostfs-node/config/morph/config_test.go
index 5a021abc3..be61d8608 100644
--- a/cmd/frostfs-node/config/morph/config_test.go
+++ b/cmd/frostfs-node/config/morph/config_test.go
@@ -19,35 +19,22 @@ func TestMorphSection(t *testing.T) {
require.Equal(t, morphconfig.DialTimeoutDefault, morphconfig.DialTimeout(empty))
require.Equal(t, morphconfig.CacheTTLDefault, morphconfig.CacheTTL(empty))
require.Equal(t, morphconfig.SwitchIntervalDefault, morphconfig.SwitchInterval(empty))
- require.Equal(t, uint32(morphconfig.APEChainCacheSizeDefault), morphconfig.APEChainCacheSize(empty))
})
const path = "../../../../config/example/node"
- rpcs := []client.Endpoint{
- {
- Address: "wss://rpc1.morph.frostfs.info:40341/ws",
- Priority: 1,
- MTLSConfig: &client.MTLSConfig{
- TrustedCAList: []string{
- "/path/to/ca.pem",
- },
- KeyFile: "/path/to/key",
- CertFile: "/path/to/cert",
- },
- },
- {
- Address: "wss://rpc2.morph.frostfs.info:40341/ws",
- Priority: 2,
- },
- }
+ var (
+ rpcs = []client.Endpoint{
+ {"wss://rpc1.morph.frostfs.info:40341/ws", 1},
+ {"wss://rpc2.morph.frostfs.info:40341/ws", 2},
+ }
+ )
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
require.Equal(t, rpcs, morphconfig.RPCEndpoint(c))
require.Equal(t, 30*time.Second, morphconfig.DialTimeout(c))
require.Equal(t, 15*time.Second, morphconfig.CacheTTL(c))
require.Equal(t, 3*time.Minute, morphconfig.SwitchInterval(c))
- require.Equal(t, uint32(100000), morphconfig.APEChainCacheSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go
deleted file mode 100644
index f598efc51..000000000
--- a/cmd/frostfs-node/config/multinet/config.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package multinet
-
-import (
- "strconv"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-)
-
-const (
- subsection = "multinet"
-
- FallbackDelayDefault = 300 * time.Millisecond
-)
-
-// Enabled returns the value of "enabled" config parameter from "multinet" section.
-func Enabled(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "enabled")
-}
-
-type Subnet struct {
- Mask string
- SourceIPs []string
-}
-
-// Subnets returns the value of "subnets" config parameter from "multinet" section.
-func Subnets(c *config.Config) []Subnet {
- var result []Subnet
- sub := c.Sub(subsection).Sub("subnets")
- for i := 0; ; i++ {
- s := sub.Sub(strconv.FormatInt(int64(i), 10))
- mask := config.StringSafe(s, "mask")
- if mask == "" {
- break
- }
- sourceIPs := config.StringSliceSafe(s, "source_ips")
- result = append(result, Subnet{
- Mask: mask,
- SourceIPs: sourceIPs,
- })
- }
- return result
-}
-
-// Balancer returns the value of "balancer" config parameter from "multinet" section.
-func Balancer(c *config.Config) string {
- return config.StringSafe(c.Sub(subsection), "balancer")
-}
-
-// Restrict returns the value of "restrict" config parameter from "multinet" section.
-func Restrict(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "restrict")
-}
-
-// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
-func FallbackDelay(c *config.Config) time.Duration {
- fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
- if fd != 0 { // negative value means no fallback
- return fd
- }
- return FallbackDelayDefault
-}
diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go
deleted file mode 100644
index 5f7dc6d53..000000000
--- a/cmd/frostfs-node/config/multinet/config_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package multinet
-
-import (
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestMultinetSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
- require.Equal(t, false, Enabled(empty))
- require.Equal(t, ([]Subnet)(nil), Subnets(empty))
- require.Equal(t, "", Balancer(empty))
- require.Equal(t, false, Restrict(empty))
- require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
- })
-
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, true, Enabled(c))
- require.Equal(t, []Subnet{
- {
- Mask: "192.168.219.174/24",
- SourceIPs: []string{
- "192.168.218.185",
- "192.168.219.185",
- },
- },
- {
- Mask: "10.78.70.74/24",
- SourceIPs: []string{
- "10.78.70.185",
- "10.78.71.185",
- },
- },
- }, Subnets(c))
- require.Equal(t, "roundrobin", Balancer(c))
- require.Equal(t, false, Restrict(c))
- require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go
index c50718c5f..9dfe8ddf4 100644
--- a/cmd/frostfs-node/config/node/config.go
+++ b/cmd/frostfs-node/config/node/config.go
@@ -2,11 +2,9 @@ package nodeconfig
import (
"fmt"
- "io/fs"
- "iter"
"os"
- "slices"
"strconv"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -26,22 +24,25 @@ type PersistentStateConfig struct {
cfg *config.Config
}
-// PersistentPolicyRulesConfig is a wrapper over "persistent_policy_rules" config section
-// which provides access to persistent policy rules storage configuration of node.
-type PersistentPolicyRulesConfig struct {
+// NotificationConfig is a wrapper over "notification" config section
+// which provides access to object notification configuration of node.
+type NotificationConfig struct {
cfg *config.Config
}
const (
- subsection = "node"
- persistentSessionsSubsection = "persistent_sessions"
- persistentStateSubsection = "persistent_state"
- persistentPolicyRulesSubsection = "persistent_policy_rules"
+ subsection = "node"
+ persistentSessionsSubsection = "persistent_sessions"
+ persistentStateSubsection = "persistent_state"
+ notificationSubsection = "notification"
attributePrefix = "attribute"
// PersistentStatePathDefault is a default path for persistent state file.
PersistentStatePathDefault = ".frostfs-storage-state"
+
+ // NotificationTimeoutDefault is a default timeout for object notification operation.
+ NotificationTimeoutDefault = 5 * time.Second
)
// Key returns the value of "key" config parameter
@@ -90,8 +91,12 @@ func Wallet(c *config.Config) *keys.PrivateKey {
type stringAddressGroup []string
-func (x stringAddressGroup) Addresses() iter.Seq[string] {
- return slices.Values(x)
+func (x stringAddressGroup) IterateAddresses(f func(string) bool) {
+ for i := range x {
+ if f(x[i]) {
+ break
+ }
+ }
}
func (x stringAddressGroup) NumberOfAddresses() int {
@@ -119,7 +124,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) {
func Attributes(c *config.Config) (attrs []string) {
const maxAttributes = 100
- for i := range maxAttributes {
+ for i := 0; i < maxAttributes; i++ {
attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i))
if attr == "" {
return
@@ -131,6 +136,14 @@ func Attributes(c *config.Config) (attrs []string) {
return
}
+// Relay returns the value of "relay" config parameter
+// from "node" section.
+//
+// Returns false if the value is not set.
+func Relay(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "relay")
+}
+
// PersistentSessions returns structure that provides access to "persistent_sessions"
// subsection of "node" section.
func PersistentSessions(c *config.Config) PersistentSessionsConfig {
@@ -164,51 +177,71 @@ func (p PersistentStateConfig) Path() string {
return PersistentStatePathDefault
}
-const (
- // PermDefault is a default permission bits for local override storage file.
- PermDefault = 0o644
-)
-
-// PersistentPolicyRules returns structure that provides access to "persistent_policy_rules"
+// Notification returns structure that provides access to "notification"
// subsection of "node" section.
-func PersistentPolicyRules(c *config.Config) PersistentPolicyRulesConfig {
- return PersistentPolicyRulesConfig{
- c.Sub(subsection).Sub(persistentPolicyRulesSubsection),
+func Notification(c *config.Config) NotificationConfig {
+ return NotificationConfig{
+ c.Sub(subsection).Sub(notificationSubsection),
}
}
-// Path returns the value of "path" config parameter.
+// Enabled returns the value of "enabled" config parameter from "notification"
+// subsection of "node" section.
//
-// Returns empty string if missing, for compatibility with older configurations.
-func (l PersistentPolicyRulesConfig) Path() string {
- return config.StringSafe(l.cfg, "path")
+// Returns false if the value is not presented.
+func (n NotificationConfig) Enabled() bool {
+ return config.BoolSafe(n.cfg, "enabled")
}
-// Perm returns the value of "perm" config parameter as a fs.FileMode.
+// DefaultTopic returns the value of "default_topic" config parameter from
+// "notification" subsection of "node" section.
//
-// Returns PermDefault if the value is not a positive number.
-func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
- p := config.UintSafe(l.cfg, "perm")
- if p == 0 {
- p = PermDefault
+// Returns empty string if the value is not presented.
+func (n NotificationConfig) DefaultTopic() string {
+ return config.StringSafe(n.cfg, "default_topic")
+}
+
+// Endpoint returns the value of "endpoint" config parameter from "notification"
+// subsection of "node" section.
+//
+// Returns empty string if the value is not presented.
+func (n NotificationConfig) Endpoint() string {
+ return config.StringSafe(n.cfg, "endpoint")
+}
+
+// Timeout returns the value of "timeout" config parameter from "notification"
+// subsection of "node" section.
+//
+// Returns NotificationTimeoutDefault if the value is not positive.
+func (n NotificationConfig) Timeout() time.Duration {
+ v := config.DurationSafe(n.cfg, "timeout")
+ if v > 0 {
+ return v
}
- return fs.FileMode(p)
+ return NotificationTimeoutDefault
}
-// NoSync returns the value of "no_sync" config parameter as a bool value.
+// CertPath returns the value of "certificate_path" config parameter from "notification"
+// subsection of "node" section.
//
-// Returns false if the value is not a boolean.
-func (l PersistentPolicyRulesConfig) NoSync() bool {
- return config.BoolSafe(l.cfg, "no_sync")
+// Returns empty string if the value is not presented.
+func (n NotificationConfig) CertPath() string {
+ return config.StringSafe(n.cfg, "certificate")
}
-// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
-func CompatibilityMode(c *config.Config) bool {
- return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode")
+// KeyPath returns the value of "key_path" config parameter from
+// "notification" subsection of "node" section.
+//
+// Returns empty string if the value is not presented.
+func (n NotificationConfig) KeyPath() string {
+ return config.StringSafe(n.cfg, "key")
}
-// LocodeDBPath returns path to LOCODE database.
-func LocodeDBPath(c *config.Config) string {
- return config.String(c.Sub(subsection), "locode_db_path")
+// CAPath returns the value of "ca_path" config parameter from
+// "notification" subsection of "node" section.
+//
+// Returns empty string if the value is not presented.
+func (n NotificationConfig) CAPath() string {
+ return config.StringSafe(n.cfg, "ca")
}
diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go
index 9af1dc038..3a1120491 100644
--- a/cmd/frostfs-node/config/node/config_test.go
+++ b/cmd/frostfs-node/config/node/config_test.go
@@ -2,6 +2,7 @@ package nodeconfig
import (
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
@@ -29,23 +30,47 @@ func TestNodeSection(t *testing.T) {
)
attribute := Attributes(empty)
+ relay := Relay(empty)
persisessionsPath := PersistentSessions(empty).Path()
persistatePath := PersistentState(empty).Path()
+ notificationDefaultEnabled := Notification(empty).Enabled()
+ notificationDefaultEndpoint := Notification(empty).Endpoint()
+ notificationDefaultTimeout := Notification(empty).Timeout()
+ notificationDefaultTopic := Notification(empty).DefaultTopic()
+ notificationDefaultCertPath := Notification(empty).CertPath()
+ notificationDefaultKeyPath := Notification(empty).KeyPath()
+ notificationDefaultCAPath := Notification(empty).CAPath()
require.Empty(t, attribute)
+ require.Equal(t, false, relay)
require.Equal(t, "", persisessionsPath)
require.Equal(t, PersistentStatePathDefault, persistatePath)
+ require.Equal(t, false, notificationDefaultEnabled)
+ require.Equal(t, "", notificationDefaultEndpoint)
+ require.Equal(t, NotificationTimeoutDefault, notificationDefaultTimeout)
+ require.Equal(t, "", notificationDefaultTopic)
+ require.Equal(t, "", notificationDefaultCertPath)
+ require.Equal(t, "", notificationDefaultKeyPath)
+ require.Equal(t, "", notificationDefaultCAPath)
})
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
key := Key(c)
addrs := BootstrapAddresses(c)
attributes := Attributes(c)
+ relay := Relay(c)
wKey := Wallet(c)
persisessionsPath := PersistentSessions(c).Path()
persistatePath := PersistentState(c).Path()
+ notificationEnabled := Notification(c).Enabled()
+ notificationEndpoint := Notification(c).Endpoint()
+ notificationTimeout := Notification(c).Timeout()
+ notificationDefaultTopic := Notification(c).DefaultTopic()
+ notificationCertPath := Notification(c).CertPath()
+ notificationKeyPath := Notification(c).KeyPath()
+ notificationCAPath := Notification(c).CAPath()
expectedAddr := []struct {
str string
@@ -84,6 +109,8 @@ func TestNodeSection(t *testing.T) {
return false
})
+ require.Equal(t, true, relay)
+
require.Len(t, attributes, 2)
require.Equal(t, "Price:11", attributes[0])
require.Equal(t, "UN-LOCODE:RU MSK", attributes[1])
@@ -95,6 +122,13 @@ func TestNodeSection(t *testing.T) {
require.Equal(t, "/sessions", persisessionsPath)
require.Equal(t, "/state", persistatePath)
+ require.Equal(t, true, notificationEnabled)
+ require.Equal(t, "tls://localhost:4222", notificationEndpoint)
+ require.Equal(t, 6*time.Second, notificationTimeout)
+ require.Equal(t, "topic", notificationDefaultTopic)
+ require.Equal(t, "/cert/path", notificationCertPath)
+ require.Equal(t, "/key/path", notificationKeyPath)
+ require.Equal(t, "/ca/path", notificationCAPath)
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go
index c8c967d30..f7a33b5e0 100644
--- a/cmd/frostfs-node/config/object/config.go
+++ b/cmd/frostfs-node/config/object/config.go
@@ -10,17 +10,14 @@ type PutConfig struct {
cfg *config.Config
}
-// GetConfig is a wrapper over "get" config section which provides access
-// to object get pipeline configuration of object service.
-type GetConfig struct {
- cfg *config.Config
-}
-
const (
subsection = "object"
putSubsection = "put"
- getSubsection = "get"
+
+ // PutPoolSizeDefault is a default value of routine pool size to
+ // process object.Put requests in object service.
+ PutPoolSizeDefault = 10
)
// Put returns structure that provides access to "put" subsection of
@@ -31,20 +28,31 @@ func Put(c *config.Config) PutConfig {
}
}
+// PoolSizeRemote returns the value of "pool_size_remote" config parameter.
+//
+// Returns PutPoolSizeDefault if the value is not a positive number.
+func (g PutConfig) PoolSizeRemote() int {
+ v := config.Int(g.cfg, "pool_size_remote")
+ if v > 0 {
+ return int(v)
+ }
+
+ return PutPoolSizeDefault
+}
+
+// PoolSizeLocal returns the value of "pool_size_local" config parameter.
+//
+// Returns PutPoolSizeDefault if the value is not a positive number.
+func (g PutConfig) PoolSizeLocal() int {
+ v := config.Int(g.cfg, "pool_size_local")
+ if v > 0 {
+ return int(v)
+ }
+
+ return PutPoolSizeDefault
+}
+
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
}
-
-// Get returns structure that provides access to "get" subsection of
-// "object" section.
-func Get(c *config.Config) GetConfig {
- return GetConfig{
- c.Sub(subsection).Sub(getSubsection),
- }
-}
-
-// Priority returns the value of "priority" config parameter.
-func (g GetConfig) Priority() []string {
- return config.StringSliceSafe(g.cfg, "priority")
-}
diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go
index 1c525ef55..513b6e9c5 100644
--- a/cmd/frostfs-node/config/object/config_test.go
+++ b/cmd/frostfs-node/config/object/config_test.go
@@ -13,13 +13,17 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
+ require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
+ require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
+ require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
+ require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}
diff --git a/cmd/frostfs-node/config/policer/config_test.go b/cmd/frostfs-node/config/policer/config_test.go
index 95f0c3af2..a4c23eae7 100644
--- a/cmd/frostfs-node/config/policer/config_test.go
+++ b/cmd/frostfs-node/config/policer/config_test.go
@@ -19,7 +19,7 @@ func TestPolicerSection(t *testing.T) {
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
require.Equal(t, 15*time.Second, policerconfig.HeadTimeout(c))
}
diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go
index 6c3e8adab..191694970 100644
--- a/cmd/frostfs-node/config/profiler/config.go
+++ b/cmd/frostfs-node/config/profiler/config.go
@@ -52,7 +52,7 @@ func Address(c *config.Config) string {
return AddressDefault
}
-// BlockRate returns the value of "block_rate" config parameter
+// BlockRates returns the value of "block_rate" config parameter
// from "pprof" section.
func BlockRate(c *config.Config) int {
s := c.Sub(subsection)
diff --git a/cmd/frostfs-node/config/profiler/config_test.go b/cmd/frostfs-node/config/profiler/config_test.go
index 2f1cb1788..355874387 100644
--- a/cmd/frostfs-node/config/profiler/config_test.go
+++ b/cmd/frostfs-node/config/profiler/config_test.go
@@ -25,7 +25,7 @@ func TestProfilerSection(t *testing.T) {
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
to := profilerconfig.ShutdownTimeout(c)
addr := profilerconfig.Address(c)
diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go
deleted file mode 100644
index 85f8180ed..000000000
--- a/cmd/frostfs-node/config/qos/config.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package qos
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-const (
- subsection = "qos"
- criticalSubSection = "critical"
- internalSubSection = "internal"
-)
-
-// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
-// parameter from "qos" section.
-//
-// Returns an empty list if not set.
-func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
- return authorizedKeys(c, criticalSubSection)
-}
-
-// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
-// parameter from "qos" section.
-//
-// Returns an empty list if not set.
-func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
- return authorizedKeys(c, internalSubSection)
-}
-
-func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
- strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
- pubs := make(keys.PublicKeys, 0, len(strKeys))
-
- for i := range strKeys {
- pub, err := keys.NewPublicKeyFromString(strKeys[i])
- if err != nil {
- panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
- }
-
- pubs = append(pubs, pub)
- }
-
- return pubs
-}
diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go
deleted file mode 100644
index b3b6019cc..000000000
--- a/cmd/frostfs-node/config/qos/config_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package qos
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestQoSSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- empty := configtest.EmptyConfig()
-
- require.Empty(t, CriticalAuthorizedKeys(empty))
- require.Empty(t, InternalAuthorizedKeys(empty))
- })
-
- const path = "../../../../config/example/node"
-
- criticalPubs := make(keys.PublicKeys, 2)
- criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
- criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
-
- internalPubs := make(keys.PublicKeys, 2)
- internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
- internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
-
- fileConfigTest := func(c *config.Config) {
- require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
- require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go
index e954bf19d..0fbac935c 100644
--- a/cmd/frostfs-node/config/replicator/config.go
+++ b/cmd/frostfs-node/config/replicator/config.go
@@ -11,8 +11,6 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second
- // PoolSizeDefault is a default pool size for put request in replicator.
- PoolSizeDefault = 10
)
// PutTimeout returns the value of "put_timeout" config parameter
@@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter
// from "replicator" section.
-//
-// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int {
- v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
- if v > 0 {
- return v
- }
-
- return PoolSizeDefault
+ return int(config.IntSafe(c.Sub(subsection), "pool_size"))
}
diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go
index 2aa490946..0118aa0b2 100644
--- a/cmd/frostfs-node/config/replicator/config_test.go
+++ b/cmd/frostfs-node/config/replicator/config_test.go
@@ -15,12 +15,12 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
- require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
+ require.Equal(t, 0, replicatorconfig.PoolSize(empty))
})
const path = "../../../../config/example/node"
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
require.Equal(t, 15*time.Second, replicatorconfig.PutTimeout(c))
require.Equal(t, 10, replicatorconfig.PoolSize(c))
}
diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go
deleted file mode 100644
index e0efdfde2..000000000
--- a/cmd/frostfs-node/config/rpc/config.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package rpcconfig
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
-)
-
-const (
- subsection = "rpc"
- limitsSubsection = "limits"
-)
-
-type LimitConfig struct {
- Methods []string
- MaxOps int64
-}
-
-// Limits returns the "limits" config from "rpc" section.
-func Limits(c *config.Config) []LimitConfig {
- c = c.Sub(subsection).Sub(limitsSubsection)
-
- var limits []LimitConfig
-
- for i := uint64(0); ; i++ {
- si := strconv.FormatUint(i, 10)
- sc := c.Sub(si)
-
- methods := config.StringSliceSafe(sc, "methods")
- if len(methods) == 0 {
- break
- }
-
- if sc.Value("max_ops") == nil {
- panic("no max operations for method group")
- }
-
- limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
- }
-
- return limits
-}
diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go
deleted file mode 100644
index a6365e19f..000000000
--- a/cmd/frostfs-node/config/rpc/config_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package rpcconfig
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestRPCSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- require.Empty(t, Limits(configtest.EmptyConfig()))
- })
-
- t.Run("correct config", func(t *testing.T) {
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- limits := Limits(c)
- require.Len(t, limits, 2)
-
- limit0 := limits[0]
- limit1 := limits[1]
-
- require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
- require.Equal(t, limit0.MaxOps, int64(1000))
-
- require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
- require.Equal(t, limit1.MaxOps, int64(10000))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-
- t.Run("no max operations", func(t *testing.T) {
- const path = "testdata/no_max_ops"
-
- fileConfigTest := func(c *config.Config) {
- require.Panics(t, func() { _ = Limits(c) })
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-
- t.Run("zero max operations", func(t *testing.T) {
- const path = "testdata/zero_max_ops"
-
- fileConfigTest := func(c *config.Config) {
- limits := Limits(c)
- require.Len(t, limits, 2)
-
- limit0 := limits[0]
- limit1 := limits[1]
-
- require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
- require.Equal(t, limit0.MaxOps, int64(0))
-
- require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
- require.Equal(t, limit1.MaxOps, int64(10000))
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
- })
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
deleted file mode 100644
index 2fed4c5bc..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
+++ /dev/null
@@ -1,3 +0,0 @@
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
deleted file mode 100644
index 6156aa71d..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ]
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- }
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
deleted file mode 100644
index e50b7ae93..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
deleted file mode 100644
index ce7302b0b..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
+++ /dev/null
@@ -1,4 +0,0 @@
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_0_MAX_OPS=0
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
deleted file mode 100644
index 16a1c173f..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ],
- "max_ops": 0
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- }
-}
diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
deleted file mode 100644
index 525d768d4..000000000
--- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 0
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go
index e98c032f0..28ec65291 100644
--- a/cmd/frostfs-node/config/test/config.go
+++ b/cmd/frostfs-node/config/test/config.go
@@ -11,6 +11,8 @@ import (
)
func fromFile(path string) *config.Config {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+
return config.New(path, "", "")
}
@@ -62,6 +64,7 @@ func loadEnv(t testing.TB, path string) {
v = strings.Trim(v, `"`)
- t.Setenv(k, v)
+ err = os.Setenv(k, v)
+ require.NoError(t, err, "can't set environment variable")
}
}
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
index 91ef669ee..e846be158 100644
--- a/cmd/frostfs-node/config/tracing/config.go
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -1,12 +1,6 @@
package tracing
import (
- "crypto/x509"
- "errors"
- "fmt"
- "os"
- "strconv"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -17,53 +11,19 @@ const (
)
// ToTracingConfig extracts tracing config.
-func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
- conf := &tracing.Config{
+func ToTracingConfig(c *config.Config) *tracing.Config {
+ return &tracing.Config{
Enabled: config.BoolSafe(c.Sub(subsection), "enabled"),
Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")),
Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"),
Service: "frostfs-node",
InstanceID: getInstanceIDOrDefault(c),
Version: misc.Version,
- Attributes: make(map[string]string),
}
-
- if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
- caBytes, err := os.ReadFile(trustedCa)
- if err != nil {
- return nil, fmt.Errorf("cannot read trusted ca cert by path: %w", err)
- }
- certPool := x509.NewCertPool()
- ok := certPool.AppendCertsFromPEM(caBytes)
- if !ok {
- return nil, errors.New("can't fill cert pool by ca cert")
- }
- conf.ServerCaCertPool = certPool
- }
-
- i := uint64(0)
- for ; ; i++ {
- si := strconv.FormatUint(i, 10)
- ac := c.Sub(subsection).Sub("attributes").Sub(si)
- k := config.StringSafe(ac, "key")
- if k == "" {
- break
- }
- v := config.StringSafe(ac, "value")
- if v == "" {
- return nil, fmt.Errorf("empty tracing attribute value for key %s", k)
- }
- if _, ok := conf.Attributes[k]; ok {
- return nil, fmt.Errorf("tracing attribute key %s defined more than once", k)
- }
- conf.Attributes[k] = v
- }
-
- return conf, nil
}
func getInstanceIDOrDefault(c *config.Config) string {
- s := config.StringSliceSafe(c.Sub("node"), "addresses")
+ s := config.StringSlice(c.Sub("node"), "addresses")
if len(s) > 0 {
return s[0]
}
diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go
deleted file mode 100644
index 8e485ca6e..000000000
--- a/cmd/frostfs-node/config/tracing/config_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package tracing
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "github.com/stretchr/testify/require"
-)
-
-func TestTracingSection(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- tc, err := ToTracingConfig(configtest.EmptyConfig())
- require.NoError(t, err)
- require.Equal(t, false, tc.Enabled)
- require.Equal(t, tracing.Exporter(""), tc.Exporter)
- require.Equal(t, "", tc.Endpoint)
- require.Equal(t, "frostfs-node", tc.Service)
- require.Equal(t, "", tc.InstanceID)
- require.Nil(t, tc.ServerCaCertPool)
- require.Empty(t, tc.Attributes)
- })
-
- const path = "../../../../config/example/node"
-
- fileConfigTest := func(c *config.Config) {
- tc, err := ToTracingConfig(c)
- require.NoError(t, err)
- require.Equal(t, true, tc.Enabled)
- require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter)
- require.Equal(t, "localhost", tc.Endpoint)
- require.Equal(t, "frostfs-node", tc.Service)
- require.Nil(t, tc.ServerCaCertPool)
- require.EqualValues(t, map[string]string{
- "key0": "value",
- "key1": "value",
- }, tc.Attributes)
- }
-
- configtest.ForEachFileType(path, fileConfigTest)
-
- t.Run("ENV", func(t *testing.T) {
- configtest.ForEnvFileType(t, path, fileConfigTest)
- })
-}
diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go
index da877791e..8a8919999 100644
--- a/cmd/frostfs-node/config/tree/config.go
+++ b/cmd/frostfs-node/config/tree/config.go
@@ -10,8 +10,6 @@ import (
const (
subsection = "tree"
-
- SyncBatchSizeDefault = 1000
)
// TreeConfig is a wrapper over "tree" config section
@@ -76,17 +74,6 @@ func (c TreeConfig) SyncInterval() time.Duration {
return config.DurationSafe(c.cfg, "sync_interval")
}
-// SyncBatchSize returns the value of "sync_batch_size"
-// config parameter from the "tree" section.
-//
-// Returns `SyncBatchSizeDefault` if config value is not specified.
-func (c TreeConfig) SyncBatchSize() int {
- if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 {
- return int(v)
- }
- return SyncBatchSizeDefault
-}
-
// AuthorizedKeys parses and returns an array of "authorized_keys" config
// parameter from "tree" section.
//
diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go
index 6628b8878..898f7e715 100644
--- a/cmd/frostfs-node/config/tree/config_test.go
+++ b/cmd/frostfs-node/config/tree/config_test.go
@@ -35,7 +35,7 @@ func TestTreeSection(t *testing.T) {
require.NoError(t, err)
expectedKeys = append(expectedKeys, key)
- fileConfigTest := func(c *config.Config) {
+ var fileConfigTest = func(c *config.Config) {
treeSec := treeconfig.Tree(c)
require.True(t, treeSec.Enabled())
@@ -44,7 +44,6 @@ func TestTreeSection(t *testing.T) {
require.Equal(t, 32, treeSec.ReplicationWorkerCount())
require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout())
require.Equal(t, time.Hour, treeSec.SyncInterval())
- require.Equal(t, 2000, treeSec.SyncBatchSize())
require.Equal(t, expectedKeys, treeSec.AuthorizedKeys())
}
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index bdb280d87..d54bf13dd 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -3,80 +3,106 @@ package main
import (
"bytes"
"context"
- "net"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "strconv"
- containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
- morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc"
containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ loadroute "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/route"
+ placementrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/route/placement"
+ loadstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/storage"
containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apiClient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
-func initContainerService(_ context.Context, c *cfg) {
+const (
+ startEstimationNotifyEvent = "StartEstimation"
+ stopEstimationNotifyEvent = "StopEstimation"
+)
+
+func initContainerService(ctx context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
- wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
+ wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err)
- c.cnrClient = wrap
+ c.shared.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
cnrRdr, cnrWrt := configureEACLAndContainerSources(c, wrap, cnrSrc)
- var frostfsIDSubjectProvider frostfsidcore.SubjectProvider
- frostfsIDSubjectProvider, err = frostfsid.NewFromMorph(c.cfgMorph.client, c.cfgFrostfsID.scriptHash, 0)
- fatalOnErr(err)
+ loadAccumulator := loadstorage.New(loadstorage.Prm{})
- cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
- if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
- frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
+ loadPlacementBuilder := &loadPlacementBuilder{
+ log: c.log,
+ nmSrc: c.netMapSource,
+ cnrSrc: cnrSrc,
}
- c.frostfsidClient = frostfsIDSubjectProvider
- c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
-
- defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
- c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
- c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
- )
- service := containerService.NewSignService(
- &c.key.PrivateKey,
- containerService.NewAPEServer(defaultChainRouter, cnrRdr,
- newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
- containerService.NewSplitterService(
- c.cfgContainer.containerBatchSize, c.respSvc,
- containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
- ),
- )
- service = containerService.NewAuditService(service, c.log, c.audit)
- server := containerTransportGRPC.New(service)
-
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- containerGRPC.RegisterContainerServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server)
+ routeBuilder := placementrouter.New(placementrouter.Prm{
+ PlacementBuilder: loadPlacementBuilder,
})
- c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr)
+ loadRouter := loadroute.New(
+ loadroute.Prm{
+ LocalServerInfo: c,
+ RemoteWriterProvider: &remoteLoadAnnounceProvider{
+ key: &c.key.PrivateKey,
+ netmapKeys: c,
+ clientCache: c.bgClientCache,
+ deadEndProvider: loadcontroller.SimpleWriterProvider(loadAccumulator),
+ },
+ Builder: routeBuilder,
+ },
+ loadroute.WithLogger(c.log),
+ )
+
+ setLoadController(ctx, c, loadRouter, loadAccumulator)
+
+ server := containerTransportGRPC.New(
+ containerService.NewSignService(
+ &c.key.PrivateKey,
+ &usedSpaceService{
+ Server: containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
+ loadWriterProvider: loadRouter,
+ loadPlacementBuilder: loadPlacementBuilder,
+ routeBuilder: routeBuilder,
+ cfg: c,
+ },
+ ),
+ )
+
+ for _, srv := range c.cfgGRPC.servers {
+ containerGRPC.RegisterContainerServiceServer(srv, server)
+ }
}
func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) {
+ eACLFetcher := &morphEACLFetcher{
+ w: client,
+ }
+
cnrRdr := new(morphContainerReader)
cnrWrt := &morphContainerWriter{
@@ -84,56 +110,118 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
}
if c.cfgMorph.cacheTTL <= 0 {
+ c.cfgObject.eaclSource = eACLFetcher
+ cnrRdr.eacl = eACLFetcher
c.cfgObject.cnrSource = cnrSrc
cnrRdr.src = cnrSrc
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
- c.cfgObject.cnrSource = cnrSrc
- if c.cfgMorph.containerCacheSize > 0 {
- containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
+ cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
+ cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
+ cachedContainerLister := newCachedContainerLister(client, c.cfgMorph.cacheTTL)
- subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
- ev := e.(containerEvent.PutSuccess)
+ subscribeToContainerCreation(c, func(e event.Event) {
+ ev := e.(containerEvent.PutSuccess)
- // read owner of the created container in order to update the reading cache.
- // TODO: use owner directly from the event after neofs-contract#256 will become resolved
- // but don't forget about the profit of reading the new container and caching it:
- // creation success are most commonly tracked by polling GET op.
- cnr, err := cnrSrc.Get(ctx, ev.ID)
- if err == nil {
- containerCache.containerCache.set(ev.ID, cnr, nil)
- } else {
- // unlike removal, we expect successful receive of the container
- // after successful creation, so logging can be useful
- c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
- zap.Stringer("id", ev.ID),
- zap.Error(err),
- )
- }
-
- c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
+ // read owner of the created container in order to update the reading cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ // but don't forget about the profit of reading the new container and caching it:
+ // creation success are most commonly tracked by polling GET op.
+ cnr, err := cnrSrc.Get(ev.ID)
+ if err == nil {
+ cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
+ cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
+ } else {
+ // unlike removal, we expect successful receive of the container
+ // after successful creation, so logging can be useful
+ c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
+ zap.Error(err),
)
- })
+ }
- subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
- ev := e.(containerEvent.DeleteSuccess)
- containerCache.handleRemoval(ev.ID)
- c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
- c.cfgObject.cnrSource = containerCache
- }
+ c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
- cnrRdr.lister = client
+ subscribeToContainerRemoval(c, func(e event.Event) {
+ ev := e.(containerEvent.DeleteSuccess)
+
+ // read owner of the removed container in order to update the listing cache.
+ // It's strange to read already removed container, but we can successfully hit
+ // the cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ cnr, err := cachedContainerStorage.Get(ev.ID)
+ if err == nil {
+ cachedContainerLister.update(cnr.Value.Owner(), ev.ID, false)
+ }
+
+ cachedContainerStorage.handleRemoval(ev.ID)
+ c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+
+ c.cfgObject.eaclSource = cachedEACLStorage
+ c.cfgObject.cnrSource = cachedContainerStorage
+
+ cnrRdr.lister = cachedContainerLister
+ cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
+
+ cnrWrt.cacheEnabled = true
+ cnrWrt.eacls = cachedEACLStorage
}
return cnrRdr, cnrWrt
}
+func setLoadController(ctx context.Context, c *cfg, loadRouter *loadroute.Router, loadAccumulator *loadstorage.Storage) {
+ pubKey := c.key.PublicKey().Bytes()
+
+ // container wrapper that always sends non-notary
+ // requests
+ wrapperNoNotary, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
+ fatalOnErr(err)
+
+ resultWriter := &morphLoadWriter{
+ log: c.log,
+ cnrMorphClient: wrapperNoNotary,
+ key: pubKey,
+ }
+
+ localMetrics := &localStorageLoad{
+ log: c.log,
+ engine: c.cfgObject.cfgLocalStorage.localStorage,
+ }
+
+ ctrl := loadcontroller.New(
+ loadcontroller.Prm{
+ LocalMetrics: loadcontroller.SimpleIteratorProvider(localMetrics),
+ AnnouncementAccumulator: loadcontroller.SimpleIteratorProvider(loadAccumulator),
+ LocalAnnouncementTarget: loadRouter,
+ ResultReceiver: loadcontroller.SimpleWriterProvider(resultWriter),
+ },
+ loadcontroller.WithLogger(c.log),
+ )
+
+ setContainerNotificationParser(c, startEstimationNotifyEvent, containerEvent.ParseStartEstimation)
+ addContainerAsyncNotificationHandler(c, startEstimationNotifyEvent, func(ev event.Event) {
+ ctrl.Start(ctx, loadcontroller.StartPrm{
+ Epoch: ev.(containerEvent.StartEstimation).Epoch(),
+ })
+ })
+
+ setContainerNotificationParser(c, stopEstimationNotifyEvent, containerEvent.ParseStopEstimation)
+ addContainerAsyncNotificationHandler(c, stopEstimationNotifyEvent, func(ev event.Event) {
+ ctrl.Stop(ctx, loadcontroller.StopPrm{
+ Epoch: ev.(containerEvent.StopEstimation).Epoch(),
+ })
+ })
+}
+
// addContainerNotificationHandler adds handler that will be executed synchronously.
func addContainerNotificationHandler(c *cfg, sTyp string, h event.Handler) {
typ := event.TypeFromString(sTyp)
@@ -196,6 +284,219 @@ func setContainerNotificationParser(c *cfg, sTyp string, p event.NotificationPar
c.cfgContainer.parsers[typ] = p
}
+type morphLoadWriter struct {
+ log *logger.Logger
+
+ cnrMorphClient *cntClient.Client
+
+ key []byte
+}
+
+func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
+ w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
+ zap.Uint64("epoch", a.Epoch()),
+ zap.Stringer("cid", a.Container()),
+ zap.Uint64("size", a.Value()),
+ )
+
+ prm := cntClient.AnnounceLoadPrm{}
+
+ prm.SetAnnouncement(a)
+ prm.SetReporter(w.key)
+
+ return w.cnrMorphClient.AnnounceLoad(prm)
+}
+
+func (*morphLoadWriter) Close(context.Context) error {
+ return nil
+}
+
+type nopLoadWriter struct{}
+
+func (nopLoadWriter) Put(containerSDK.SizeEstimation) error {
+ return nil
+}
+
+func (nopLoadWriter) Close(context.Context) error {
+ return nil
+}
+
+type remoteLoadAnnounceProvider struct {
+ key *ecdsa.PrivateKey
+
+ netmapKeys netmapCore.AnnouncedKeys
+
+ clientCache interface {
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
+ }
+
+ deadEndProvider loadcontroller.WriterProvider
+}
+
+func (r *remoteLoadAnnounceProvider) InitRemote(srv loadcontroller.ServerInfo) (loadcontroller.WriterProvider, error) {
+ if srv == nil {
+ return r.deadEndProvider, nil
+ }
+
+ if r.netmapKeys.IsLocalKey(srv.PublicKey()) {
+ // if local => return no-op writer
+ return loadcontroller.SimpleWriterProvider(new(nopLoadWriter)), nil
+ }
+
+ var info client.NodeInfo
+
+ err := client.NodeInfoFromRawNetmapElement(&info, srv)
+ if err != nil {
+ return nil, fmt.Errorf("parse client node info: %w", err)
+ }
+
+ c, err := r.clientCache.Get(info)
+ if err != nil {
+ return nil, fmt.Errorf("could not initialize API client: %w", err)
+ }
+
+ return &remoteLoadAnnounceWriterProvider{
+ client: c,
+ }, nil
+}
+
+type remoteLoadAnnounceWriterProvider struct {
+ client client.Client
+}
+
+func (p *remoteLoadAnnounceWriterProvider) InitWriter([]loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
+ return &remoteLoadAnnounceWriter{
+ client: p.client,
+ }, nil
+}
+
+type remoteLoadAnnounceWriter struct {
+ client client.Client
+
+ buf []containerSDK.SizeEstimation
+}
+
+func (r *remoteLoadAnnounceWriter) Put(a containerSDK.SizeEstimation) error {
+ r.buf = append(r.buf, a)
+
+ return nil
+}
+
+func (r *remoteLoadAnnounceWriter) Close(ctx context.Context) error {
+ cliPrm := apiClient.PrmAnnounceSpace{
+ Announcements: r.buf,
+ }
+
+ _, err := r.client.ContainerAnnounceUsedSpace(ctx, cliPrm)
+ return err
+}
+
+type loadPlacementBuilder struct {
+ log *logger.Logger
+
+ nmSrc netmapCore.Source
+
+ cnrSrc containerCore.Source
+}
+
+func (l *loadPlacementBuilder) BuildPlacement(epoch uint64, cnr cid.ID) ([][]netmap.NodeInfo, error) {
+ cnrNodes, nm, err := l.buildPlacement(epoch, cnr)
+ if err != nil {
+ return nil, err
+ }
+
+ const pivotPrefix = "load_announcement_"
+
+ pivot := []byte(
+ pivotPrefix + strconv.FormatUint(epoch, 10),
+ )
+
+ placement, err := nm.PlacementVectors(cnrNodes, pivot)
+ if err != nil {
+ return nil, fmt.Errorf("could not build placement vectors: %w", err)
+ }
+
+ return placement, nil
+}
+
+func (l *loadPlacementBuilder) buildPlacement(epoch uint64, idCnr cid.ID) ([][]netmap.NodeInfo, *netmap.NetMap, error) {
+ cnr, err := l.cnrSrc.Get(idCnr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ nm, err := l.nmSrc.GetNetMapByEpoch(epoch)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not get network map: %w", err)
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ idCnr.Encode(binCnr)
+
+ cnrNodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not build container nodes: %w", err)
+ }
+
+ return cnrNodes, nm, nil
+}
+
+type localStorageLoad struct {
+ log *logger.Logger
+
+ engine *engine.StorageEngine
+}
+
+func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontroller.UsedSpaceHandler) error {
+ idList, err := engine.ListContainers(context.TODO(), d.engine)
+ if err != nil {
+ return fmt.Errorf("list containers on engine failure: %w", err)
+ }
+
+ for i := range idList {
+ sz, err := engine.ContainerSize(d.engine, idList[i])
+ if err != nil {
+ d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
+ zap.Stringer("cid", idList[i]),
+ zap.String("error", err.Error()),
+ )
+
+ continue
+ }
+
+ d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
+ zap.Uint64("size", sz),
+ zap.Stringer("cid", idList[i]),
+ )
+
+ var a containerSDK.SizeEstimation
+ a.SetContainer(idList[i])
+ a.SetValue(sz)
+
+ if f != nil && !f(a) {
+ continue
+ }
+
+ if err := h(a); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type usedSpaceService struct {
+ containerService.Server
+
+ loadWriterProvider loadcontroller.WriterProvider
+
+ loadPlacementBuilder *loadPlacementBuilder
+
+ routeBuilder loadroute.Builder
+
+ cfg *cfg
+}
+
func (c *cfg) PublicKey() []byte {
return nodeKeyFromNetmap(c)
}
@@ -216,40 +517,177 @@ func (c *cfg) ExternalAddresses() []string {
return c.cfgNodeInfo.localInfo.ExternalAddresses()
}
+func (c *usedSpaceService) PublicKey() []byte {
+ return nodeKeyFromNetmap(c.cfg)
+}
+
+func (c *usedSpaceService) IterateAddresses(f func(string) bool) {
+ c.cfg.iterateNetworkAddresses(f)
+}
+
+func (c *usedSpaceService) NumberOfAddresses() int {
+ return c.cfg.addressNum()
+}
+
+func (c *usedSpaceService) ExternalAddresses() []string {
+ return c.cfg.ExternalAddresses()
+}
+
+func (c *usedSpaceService) AnnounceUsedSpace(ctx context.Context, req *containerV2.AnnounceUsedSpaceRequest) (*containerV2.AnnounceUsedSpaceResponse, error) {
+ var passedRoute []loadcontroller.ServerInfo
+
+ for hdr := req.GetVerificationHeader(); hdr != nil; hdr = hdr.GetOrigin() {
+ passedRoute = append(passedRoute, &containerOnlyKeyRemoteServerInfo{
+ key: hdr.GetBodySignature().GetKey(),
+ })
+ }
+
+ for left, right := 0, len(passedRoute)-1; left < right; left, right = left+1, right-1 {
+ passedRoute[left], passedRoute[right] = passedRoute[right], passedRoute[left]
+ }
+
+ passedRoute = append(passedRoute, c)
+
+ w, err := c.loadWriterProvider.InitWriter(passedRoute)
+ if err != nil {
+ return nil, fmt.Errorf("could not initialize container's used space writer: %w", err)
+ }
+
+ var est containerSDK.SizeEstimation
+
+ for _, aV2 := range req.GetBody().GetAnnouncements() {
+ err = est.ReadFromV2(aV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid size announcement: %w", err)
+ }
+
+ if err := c.processLoadValue(ctx, est, passedRoute, w); err != nil {
+ return nil, err
+ }
+ }
+
+ respBody := new(containerV2.AnnounceUsedSpaceResponseBody)
+
+ resp := new(containerV2.AnnounceUsedSpaceResponse)
+ resp.SetBody(respBody)
+
+ c.cfg.respSvc.SetMeta(resp)
+
+ return resp, nil
+}
+
+var errNodeOutsideContainer = errors.New("node outside the container")
+
+type containerOnlyKeyRemoteServerInfo struct {
+ key []byte
+}
+
+func (i *containerOnlyKeyRemoteServerInfo) PublicKey() []byte {
+ return i.key
+}
+
+func (*containerOnlyKeyRemoteServerInfo) IterateAddresses(func(string) bool) {
+}
+
+func (*containerOnlyKeyRemoteServerInfo) NumberOfAddresses() int {
+ return 0
+}
+
+func (*containerOnlyKeyRemoteServerInfo) ExternalAddresses() []string {
+ return nil
+}
+
+func (l *loadPlacementBuilder) isNodeFromContainerKey(epoch uint64, cnr cid.ID, key []byte) (bool, error) {
+ cnrNodes, _, err := l.buildPlacement(epoch, cnr)
+ if err != nil {
+ return false, err
+ }
+
+ for i := range cnrNodes {
+ for j := range cnrNodes[i] {
+ if bytes.Equal(cnrNodes[i][j].PublicKey(), key) {
+ return true, nil
+ }
+ }
+ }
+
+ return false, nil
+}
+
+func (c *usedSpaceService) processLoadValue(_ context.Context, a containerSDK.SizeEstimation,
+ route []loadcontroller.ServerInfo, w loadcontroller.Writer) error {
+ fromCnr, err := c.loadPlacementBuilder.isNodeFromContainerKey(a.Epoch(), a.Container(), route[0].PublicKey())
+ if err != nil {
+ return fmt.Errorf("could not verify that the sender belongs to the container: %w", err)
+ } else if !fromCnr {
+ return errNodeOutsideContainer
+ }
+
+ err = loadroute.CheckRoute(c.routeBuilder, a, route)
+ if err != nil {
+ return fmt.Errorf("wrong route of container's used space value: %w", err)
+ }
+
+ err = w.Put(a)
+ if err != nil {
+ return fmt.Errorf("could not write container's used space value: %w", err)
+ }
+
+ return nil
+}
+
// implements interface required by container service provided by morph executor.
type morphContainerReader struct {
+ eacl containerCore.EACLSource
+
src containerCore.Source
lister interface {
- ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
- IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
+ List(*user.ID) ([]cid.ID, error)
}
}
-func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) {
- return x.src.Get(ctx, id)
+func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
+ return x.src.Get(id)
}
-func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) {
- return x.src.DeletionInfo(ctx, id)
+func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
+ return x.src.DeletionInfo(id)
}
-func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) {
- return x.lister.ContainersOf(ctx, id)
+func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) {
+ return x.eacl.GetEACL(id)
}
-func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
- return x.lister.IterateContainersOf(ctx, id, processCID)
+func (x *morphContainerReader) List(id *user.ID) ([]cid.ID, error) {
+ return x.lister.List(id)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
+
+ cacheEnabled bool
+ eacls ttlEACLStorage
}
-func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
- return cntClient.Put(ctx, m.neoClient, cnr)
+func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
+ return cntClient.Put(m.neoClient, cnr)
}
-func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
- return cntClient.Delete(ctx, m.neoClient, witness)
+func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
+ return cntClient.Delete(m.neoClient, witness)
+}
+
+func (m morphContainerWriter) PutEACL(eaclInfo containerCore.EACL) error {
+ err := cntClient.PutEACL(m.neoClient, eaclInfo)
+ if err != nil {
+ return err
+ }
+
+ if m.cacheEnabled {
+ id, _ := eaclInfo.Value.CID()
+ m.eacls.InvalidateEACL(id)
+ }
+
+ return nil
}
diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go
index 1825013c7..787fe3351 100644
--- a/cmd/frostfs-node/control.go
+++ b/cmd/frostfs-node/control.go
@@ -2,24 +2,29 @@ package main
import (
"context"
- "fmt"
"net"
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
- metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
- tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
"google.golang.org/grpc"
)
const serviceNameControl = "control"
-func initControlService(ctx context.Context, c *cfg) {
+type treeSynchronizer struct {
+ treeSvc *tree.Service
+}
+
+func (t treeSynchronizer) Synchronize(ctx context.Context, cnr cid.ID, treeID string) error {
+ return t.treeSvc.SynchronizeTree(ctx, cnr, treeID)
+}
+
+func initControlService(c *cfg) {
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
if endpoint == controlconfig.GRPCEndpointDefault {
return
@@ -43,34 +48,28 @@ func initControlService(ctx context.Context, c *cfg) {
controlSvc.WithReplicator(c.replicator),
controlSvc.WithNodeState(c),
controlSvc.WithLocalStorage(c.cfgObject.cfgLocalStorage.localStorage),
- controlSvc.WithTreeService(c.treeService),
- controlSvc.WithLocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine),
+ controlSvc.WithTreeService(treeSynchronizer{
+ c.treeService,
+ }),
)
lis, err := net.Listen("tcp", endpoint)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
- c.cfgControlService.server = grpc.NewServer(
- grpc.ChainUnaryInterceptor(
- qos.NewSetCriticalIOTagUnaryServerInterceptor(),
- metrics.NewUnaryServerInterceptor(),
- tracing.NewUnaryServerInterceptor(),
- ),
- // control service has no stream methods, so no stream interceptors added
- )
+ c.cfgControlService.server = grpc.NewServer()
c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
+ stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
})
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", serviceNameControl),
zap.String("endpoint", endpoint))
fatalOnErr(c.cfgControlService.server.Serve(lis))
@@ -82,47 +81,14 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
return c.cfgNetmap.state.controlNetmapStatus()
}
-func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
- c.notifySystemd(ctx, st)
+func (c *cfg) setHealthStatus(st control.HealthStatus) {
c.healthStatus.Store(int32(st))
- c.metricsCollector.State().SetHealth(int32(st))
-}
-func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
- if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- c.notifySystemd(ctx, newSt)
- c.metricsCollector.State().SetHealth(int32(newSt))
+ if c.metricsCollector != nil {
+ c.metricsCollector.State().SetHealth(int32(st))
}
- return
-}
-
-func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
- old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
- c.notifySystemd(ctx, st)
- c.metricsCollector.State().SetHealth(int32(st))
- return
}
func (c *cfg) HealthStatus() control.HealthStatus {
return control.HealthStatus(c.healthStatus.Load())
}
-
-func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
- if !c.sdNotify {
- return
- }
- var err error
- switch st {
- case control.HealthStatus_READY:
- err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled)
- case control.HealthStatus_SHUTTING_DOWN:
- err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled)
- case control.HealthStatus_RECONFIGURING:
- err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled)
- default:
- err = sdnotify.Status(fmt.Sprintf("%v", st))
- }
- if err != nil {
- c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
- }
-}
diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go
deleted file mode 100644
index d2d4e9785..000000000
--- a/cmd/frostfs-node/frostfsid.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package main
-
-import (
- "context"
- "strings"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "github.com/hashicorp/golang-lru/v2/expirable"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-type subjectWithError struct {
- subject *client.Subject
- err error
-}
-
-type subjectExtWithError struct {
- subject *client.SubjectExtended
- err error
-}
-
-type morphFrostfsIDCache struct {
- subjProvider frostfsidcore.SubjectProvider
-
- subjCache *expirable.LRU[util.Uint160, subjectWithError]
-
- subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError]
-
- metrics cacheMetrics
-}
-
-func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider {
- return &morphFrostfsIDCache{
- subjProvider: subjProvider,
-
- subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl),
-
- subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl),
-
- metrics: metrics,
- }
-}
-
-func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit)
- }()
-
- result, found := m.subjCache.Get(addr)
- if found {
- hit = true
- return result.subject, result.err
- }
-
- subj, err := m.subjProvider.GetSubject(ctx, addr)
- if err != nil {
- if m.isCacheableError(err) {
- m.subjCache.Add(addr, subjectWithError{
- err: err,
- })
- }
- return nil, err
- }
-
- m.subjCache.Add(addr, subjectWithError{subject: subj})
- return subj, nil
-}
-
-func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
- hit := false
- startedAt := time.Now()
- defer func() {
- m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit)
- }()
-
- result, found := m.subjExtCache.Get(addr)
- if found {
- hit = true
- return result.subject, result.err
- }
-
- subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr)
- if err != nil {
- if m.isCacheableError(err) {
- m.subjExtCache.Add(addr, subjectExtWithError{
- err: err,
- })
- m.subjCache.Add(addr, subjectWithError{
- err: err,
- })
- }
- return nil, err
- }
-
- m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt})
- m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)})
-
- return subjExt, nil
-}
-
-func (m *morphFrostfsIDCache) isCacheableError(err error) bool {
- return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage)
-}
-
-func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
- return &client.Subject{
- PrimaryKey: subjExt.PrimaryKey,
- AdditionalKeys: subjExt.AdditionalKeys,
- Namespace: subjExt.Namespace,
- Name: subjExt.Name,
- KV: subjExt.KV,
- }
-}
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index 6b6d44750..74df7a18f 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -1,7 +1,6 @@
package main
import (
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -9,14 +8,10 @@ import (
"time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
- rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@@ -24,198 +19,114 @@ import (
const maxRecvMsgSize = 256 << 20
-func initGRPC(ctx context.Context, c *cfg) {
- var endpointsToReconnect []string
+func initGRPC(c *cfg) {
var successCount int
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
- serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
- if !ok {
- return
+ serverOpts := []grpc.ServerOption{
+ grpc.MaxRecvMsgSize(maxRecvMsgSize),
+ grpc.ChainUnaryInterceptor(
+ metrics.NewUnaryServerInterceptor(),
+ tracing.NewUnaryServerInterceptor(),
+ ),
+ grpc.ChainStreamInterceptor(
+ metrics.NewStreamServerInterceptor(),
+ tracing.NewStreamServerInterceptor(),
+ ),
+ }
+
+ tlsCfg := sc.TLS()
+
+ if tlsCfg != nil {
+ cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
+ return
+ }
+
+ var cipherSuites []uint16
+ if !tlsCfg.UseInsecureCrypto() {
+ // This more or less follows the list in https://wiki.mozilla.org/Security/Server_Side_TLS
+ // excluding:
+ // 1. TLS 1.3 suites need not be specified here.
+ // 2. Suites that use DH key exchange are not implemented by stdlib.
+ cipherSuites = []uint16{
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ }
+ }
+ creds := credentials.NewTLS(&tls.Config{
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: cipherSuites,
+ Certificates: []tls.Certificate{cert},
+ })
+
+ serverOpts = append(serverOpts, grpc.Creds(creds))
}
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
- endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
return
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(sc.Endpoint())
+ c.cfgGRPC.listeners = append(c.cfgGRPC.listeners, lis)
+ c.cfgGRPC.endpoints = append(c.cfgGRPC.endpoints, sc.Endpoint())
+
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Public API", srv, c.log)
+ stopGRPC("FrostFS Public API", srv, c.log)
})
- c.cfgGRPC.append(sc.Endpoint(), lis, srv)
+ c.cfgGRPC.servers = append(c.cfgGRPC.servers, srv)
successCount++
})
if successCount == 0 {
fatalOnErr(errors.New("could not listen to any gRPC endpoints"))
}
- c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
-
- for _, endpoint := range endpointsToReconnect {
- scheduleReconnect(ctx, endpoint, c)
- }
}
-func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
-
- t := time.NewTicker(c.cfgGRPC.reconnectTimeout)
- for {
- select {
- case <-t.C:
- if tryReconnect(ctx, endpoint, c) {
- return
- }
- case <-c.done:
- return
- }
- }
- }()
-}
-
-func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
- c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
-
- serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
- if !found {
- c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
- return true
- }
-
- lis, err := net.Listen("tcp", endpoint)
- if err != nil {
- c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
- c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
- c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
- return false
- }
- c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
-
- srv := grpc.NewServer(serverOpts...)
-
- c.onShutdown(func() {
- stopGRPC(ctx, "FrostFS Public API", srv, c.log)
- })
-
- c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
-
- c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
- return true
-}
-
-func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
- unlock := c.LockAppConfigShared()
- defer unlock()
- grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
- if found {
- return
- }
- if sc.Endpoint() != endpoint {
- return
- }
- var ok bool
- result, ok = getGrpcServerOpts(ctx, c, sc)
- if !ok {
- return
- }
- found = true
- })
- return
-}
-
-func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
- serverOpts := []grpc.ServerOption{
- grpc.MaxRecvMsgSize(maxRecvMsgSize),
- grpc.ChainUnaryInterceptor(
- qos.NewUnaryServerInterceptor(),
- metrics.NewUnaryServerInterceptor(),
- tracing.NewUnaryServerInterceptor(),
- qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
- ),
- grpc.ChainStreamInterceptor(
- qos.NewStreamServerInterceptor(),
- metrics.NewStreamServerInterceptor(),
- tracing.NewStreamServerInterceptor(),
- qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
- ),
- }
-
- tlsCfg := sc.TLS()
-
- if tlsCfg != nil {
- cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
- return nil, false
- }
-
- var cipherSuites []uint16
- if !tlsCfg.UseInsecureCrypto() {
- // This more or less follows the list in https://wiki.mozilla.org/Security/Server_Side_TLS
- // excluding:
- // 1. TLS 1.3 suites need not be specified here.
- // 2. Suites that use DH key exchange are not implemented by stdlib.
- cipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
- }
- }
- creds := credentials.NewTLS(&tls.Config{
- MinVersion: tls.VersionTLS12,
- CipherSuites: cipherSuites,
- Certificates: []tls.Certificate{cert},
- })
-
- serverOpts = append(serverOpts, grpc.Creds(creds))
- }
-
- return serverOpts, true
-}
-
-func serveGRPC(ctx context.Context, c *cfg) {
- c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
+func serveGRPC(c *cfg) {
+ for i := range c.cfgGRPC.servers {
c.wg.Add(1)
+ srv := c.cfgGRPC.servers[i]
+ lis := c.cfgGRPC.listeners[i]
+ endpoint := c.cfgGRPC.endpoints[i]
+
go func() {
defer func() {
- c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
- zap.Stringer("endpoint", l.Addr()),
+ c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
+ zap.Stringer("endpoint", lis.Addr()),
)
c.wg.Done()
}()
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
- zap.Stringer("endpoint", l.Addr()),
+ zap.Stringer("endpoint", lis.Addr()),
)
- if err := s.Serve(l); err != nil {
- c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
- c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
- c.cfgGRPC.dropConnection(e)
- scheduleReconnect(ctx, e, c)
+ if err := srv.Serve(lis); err != nil {
+ c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
+ fmt.Println("gRPC server error", err)
}
}()
- })
+ }
}
-func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
- l = l.With(zap.String("name", name))
+func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
+ l = &logger.Logger{Logger: l.With(zap.String("name", name))}
- l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
+ l.Info(logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@@ -227,60 +138,9 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
select {
case <-done:
case <-time.After(1 * time.Minute):
- l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
+ l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
- l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
-}
-
-func initRPCLimiter(c *cfg) error {
- var limits []limiting.KeyLimit
- for _, l := range rpcconfig.Limits(c.appCfg) {
- limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
- }
-
- if err := validateRPCLimits(c, limits); err != nil {
- return fmt.Errorf("validate RPC limits: %w", err)
- }
-
- limiter, err := limiting.NewSemaphoreLimiter(limits)
- if err != nil {
- return fmt.Errorf("create RPC limiter: %w", err)
- }
-
- c.cfgGRPC.limiter.Store(limiter)
- return nil
-}
-
-func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
- availableMethods := getAvailableMethods(c.cfgGRPC.servers)
- for _, limit := range limits {
- for _, method := range limit.Keys {
- if _, ok := availableMethods[method]; !ok {
- return fmt.Errorf("set limit on an unknown method %q", method)
- }
- }
- }
- return nil
-}
-
-func getAvailableMethods(servers []grpcServer) map[string]struct{} {
- res := make(map[string]struct{})
- for _, server := range servers {
- for _, method := range getMethodsForServer(server.Server) {
- res[method] = struct{}{}
- }
- }
- return res
-}
-
-func getMethodsForServer(server *grpc.Server) []string {
- var res []string
- for service, info := range server.GetServiceInfo() {
- for _, method := range info.Methods {
- res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
- }
- }
- return res
+ l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}
diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go
index 7346206ef..6e8fa8f2c 100644
--- a/cmd/frostfs-node/httpcomponent.go
+++ b/cmd/frostfs-node/httpcomponent.go
@@ -2,6 +2,7 @@ package main
import (
"context"
+ "fmt"
"net/http"
"time"
@@ -20,9 +21,9 @@ type httpComponent struct {
preReload func(c *cfg)
}
-func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
+func (cmp *httpComponent) init(c *cfg) {
if !cmp.enabled {
- c.log.Info(ctx, cmp.name+" is disabled")
+ c.log.Info(fmt.Sprintf("%s is disabled", cmp.name))
return
}
// Init server with parameters
@@ -35,18 +36,20 @@ func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
cmp.shutdownDur,
),
)
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
-
- c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
- zap.String("service", cmp.name),
- zap.String("endpoint", cmp.address))
- fatalOnErr(srv.Serve())
- }()
c.closers = append(c.closers, closer{
cmp.name,
- func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
+ func() { stopAndLog(c, cmp.name, srv.Shutdown) },
+ })
+ c.workers = append(c.workers, worker{
+ cmp.name,
+ func(ctx context.Context) {
+ runAndLog(ctx, c, cmp.name, false, func(context.Context, *cfg) {
+ c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+ zap.String("service", cmp.name),
+ zap.String("endpoint", cmp.address))
+ fatalOnErr(srv.Serve())
+ })
+ },
})
}
@@ -61,8 +64,9 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
}
// Cleanup
delCloser(cmp.cfg, cmp.name)
+ delWorker(cmp.cfg, cmp.name)
// Init server with new parameters
- cmp.init(ctx, cmp.cfg)
+ cmp.init(cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
diff --git a/cmd/frostfs-node/keyspaceiterator.go b/cmd/frostfs-node/keyspaceiterator.go
index 09a8f7f73..e7214aacb 100644
--- a/cmd/frostfs-node/keyspaceiterator.go
+++ b/cmd/frostfs-node/keyspaceiterator.go
@@ -13,7 +13,7 @@ type keySpaceIterator struct {
cur *engine.Cursor
}
-func (it *keySpaceIterator) Next(ctx context.Context, batchSize uint32) ([]objectcore.Info, error) {
+func (it *keySpaceIterator) Next(ctx context.Context, batchSize uint32) ([]objectcore.AddressWithType, error) {
var prm engine.ListWithCursorPrm
prm.WithCursor(it.cur)
prm.WithCount(batchSize)
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index 0228d2a10..88032ebdb 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -6,7 +6,6 @@ import (
"fmt"
"log"
"os"
- "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -61,21 +60,21 @@ func main() {
var ctx context.Context
ctx, c.ctxCancel = context.WithCancel(context.Background())
- c.setHealthStatus(ctx, control.HealthStatus_STARTING)
-
initApp(ctx, c)
+ c.setHealthStatus(control.HealthStatus_STARTING)
+
bootUp(ctx, c)
- c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
+ c.setHealthStatus(control.HealthStatus_READY)
wait(c)
}
-func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
- c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
+func initAndLog(c *cfg, name string, initializer func(*cfg)) {
+ c.log.Info(fmt.Sprintf("initializing %s service...", name))
initializer(c)
- c.log.Info(ctx, name+" service has been successfully initialized")
+ c.log.Info(fmt.Sprintf("%s service has been successfully initialized", name))
}
func initApp(ctx context.Context, c *cfg) {
@@ -85,95 +84,73 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
- setRuntimeParameters(ctx, c)
+ setRuntimeParameters(c)
metrics, _ := metricsComponent(c)
- initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
- initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
+ initAndLog(c, "profiler", initProfilerService)
+ initAndLog(c, metrics.name, metrics.init)
- initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
+ initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initLocalStorage(ctx, c)
- initAndLog(ctx, c, "storage engine", func(c *cfg) {
+ initAndLog(c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
- initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
- initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
- initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
+ initAndLog(c, "gRPC", initGRPC)
+ initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
+ initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
+ initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
+ initAndLog(c, "session", initSessionService)
+ initAndLog(c, "notification", func(c *cfg) { initNotifications(ctx, c) })
+ initAndLog(c, "object", initObjectService)
+ initAndLog(c, "tree", initTreeService)
+ initAndLog(c, "control", initControlService)
- initAccessPolicyEngine(ctx, c)
- initAndLog(ctx, c, "access policy engine", func(c *cfg) {
- fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
- fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
- })
-
- initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
- initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
- initAndLog(ctx, c, "session", initSessionService)
- initAndLog(ctx, c, "object", initObjectService)
- initAndLog(ctx, c, "tree", initTreeService)
- initAndLog(ctx, c, "apemanager", initAPEManagerService)
- initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
-
- initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
-
- initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
+ initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
- c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
+ c.log.Info(fmt.Sprintf("starting %s service...", name))
starter(ctx, c)
if logSuccess {
- c.log.Info(ctx, name+" service started successfully")
+ c.log.Info(fmt.Sprintf("%s service started successfully", name))
}
}
-func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
- c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
+func stopAndLog(c *cfg, name string, stopper func() error) {
+ c.log.Debug(fmt.Sprintf("shutting down %s service", name))
- err := stopper(ctx)
+ err := stopper()
if err != nil {
- c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
- zap.Error(err),
+ c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
+ zap.String("error", err.Error()),
)
}
- c.log.Debug(ctx, name+" service has been stopped")
+ c.log.Debug(fmt.Sprintf("%s service has been stopped", name))
}
func bootUp(ctx context.Context, c *cfg) {
- runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
+ runAndLog(ctx, c, "NATS", true, connectNats)
+ runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
- bootstrapNode(ctx, c)
+ bootstrapNode(c)
startWorkers(ctx, c)
}
func wait(c *cfg) {
- c.log.Info(context.Background(), logs.CommonApplicationStarted,
+ c.log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
- drain := &sync.WaitGroup{}
- drain.Add(1)
- go func() {
- defer drain.Done()
- for err := range c.internalErr {
- c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
- zap.String("message", err.Error()))
- }
- }()
-
- c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
+ c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()
-
- close(c.internalErr)
- drain.Wait()
}
func (c *cfg) onShutdown(f func()) {
diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go
index d9ca01e70..19b4af51f 100644
--- a/cmd/frostfs-node/metrics.go
+++ b/cmd/frostfs-node/metrics.go
@@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.metrics == nil {
- c.metrics = new(httpComponent)
- c.metrics.cfg = c
- c.metrics.name = "metrics"
- c.metrics.handler = metrics.Handler()
+ if c.dynamicConfiguration.metrics == nil {
+ c.dynamicConfiguration.metrics = new(httpComponent)
+ c.dynamicConfiguration.metrics.cfg = c
+ c.dynamicConfiguration.metrics.name = "metrics"
+ c.dynamicConfiguration.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
- if enabled != c.metrics.enabled {
- c.metrics.enabled = enabled
+ if enabled != c.dynamicConfiguration.metrics.enabled {
+ c.dynamicConfiguration.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
- if address != c.metrics.address {
- c.metrics.address = address
+ if address != c.dynamicConfiguration.metrics.address {
+ c.dynamicConfiguration.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
- if dur != c.metrics.shutdownDur {
- c.metrics.shutdownDur = dur
+ if dur != c.dynamicConfiguration.metrics.shutdownDur {
+ c.dynamicConfiguration.metrics.shutdownDur = dur
updated = true
}
- return c.metrics, updated
+ return c.dynamicConfiguration.metrics, updated
}
func enableMetricsSvc(c *cfg) {
- c.metricsSvc.Enable()
+ c.shared.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
- c.metricsSvc.Disable()
+ c.shared.metricsSvc.Disable()
}
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 917cf6fc0..63d1605ef 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -9,12 +9,12 @@ import (
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -24,56 +24,12 @@ import (
const (
newEpochNotification = "NewEpoch"
+
+ // amount of tries(blocks) before notary deposit timeout.
+ notaryDepositRetriesAmount = 300
)
-func (c *cfg) initMorphComponents(ctx context.Context) {
- c.cfgMorph.guard.Lock()
- defer c.cfgMorph.guard.Unlock()
- if c.cfgMorph.initialized {
- return
- }
- initMorphClient(ctx, c)
-
- lookupScriptHashesInNNS(c) // smart contract auto negotiation
-
- err := c.cfgMorph.client.EnableNotarySupport(
- client.WithProxyContract(
- c.cfgMorph.proxyScriptHash,
- ),
- )
- fatalOnErr(err)
-
- c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
-
- wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
- fatalOnErr(err)
-
- var netmapSource netmap.Source
-
- c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
- c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
-
- if c.cfgMorph.cacheTTL == 0 {
- msPerBlock, err := c.cfgMorph.client.MsPerBlock()
- fatalOnErr(err)
- c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
- c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
- }
-
- if c.cfgMorph.cacheTTL < 0 {
- netmapSource = newRawNetmapStorage(wrap)
- } else {
- // use RPC node as source of netmap (with caching)
- netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
- morphconfig.NetmapCandidatesPollInterval(c.appCfg))
- }
-
- c.netMapSource = netmapSource
- c.cfgNetmap.wrapper = wrap
- c.cfgMorph.initialized = true
-}
-
-func initMorphClient(ctx context.Context, c *cfg) {
+func initMorphComponents(ctx context.Context, c *cfg) {
addresses := morphconfig.RPCEndpoint(c.appCfg)
// Morph client stable-sorts endpoints by priority. Shuffle here to randomize
@@ -85,54 +41,98 @@ func initMorphClient(ctx context.Context, c *cfg) {
cli, err := client.New(ctx,
c.key,
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
- client.WithLogger(c.log.WithTag(logger.TagMorph)),
- client.WithMetrics(c.metricsCollector.MorphClientMetrics()),
+ client.WithLogger(c.log),
+ client.WithMetrics(metrics.NewMorphClientMetrics()),
client.WithEndpoints(addresses...),
client.WithConnLostCallback(func() {
c.internalErr <- errors.New("morph connection has been lost")
}),
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
- client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
- client.WithDialerSource(c.dialerSource),
+ client.WithMorphCacheMetrics(metrics.NewNodeMorphCacheMetrics()),
)
if err != nil {
- c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
+ c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
fatalOnErr(err)
}
c.onShutdown(func() {
- c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
+ c.log.Info(logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
- c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
+ c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
+ c.cfgMorph.notaryEnabled = cli.ProbeNotary()
+
+ lookupScriptHashesInNNS(c) // smart contract auto negotiation
+
+ if c.cfgMorph.notaryEnabled {
+ err = c.cfgMorph.client.EnableNotarySupport(
+ client.WithProxyContract(
+ c.cfgMorph.proxyScriptHash,
+ ),
+ )
+ fatalOnErr(err)
+ }
+
+ c.log.Info(logs.FrostFSNodeNotarySupport,
+ zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
+ )
+
+ wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
+ fatalOnErr(err)
+
+ var netmapSource netmap.Source
+
+ c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
+
+ if c.cfgMorph.cacheTTL == 0 {
+ msPerBlock, err := c.cfgMorph.client.MsPerBlock()
+ fatalOnErr(err)
+ c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
+ c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
+ }
+
+ if c.cfgMorph.cacheTTL < 0 {
+ netmapSource = wrap
+ } else {
+ // use RPC node as source of netmap (with caching)
+ netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
+ }
+
+ c.netMapSource = netmapSource
+ c.cfgNetmap.wrapper = wrap
}
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
- tx, vub, err := makeNotaryDeposit(ctx, c)
+ // skip notary deposit in non-notary environments
+ if !c.cfgMorph.notaryEnabled {
+ return
+ }
+
+ tx, err := makeNotaryDeposit(c)
fatalOnErr(err)
if tx.Equals(util.Uint256{}) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
+ c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
- err = waitNotaryDeposit(ctx, c, tx, vub)
+ err = waitNotaryDeposit(ctx, c, tx)
fatalOnErr(err)
}
-func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
+func makeNotaryDeposit(c *cfg) (util.Uint256, error) {
const (
// gasMultiplier defines how many times more the notary
// balance must be compared to the GAS balance of the node:
@@ -146,19 +146,41 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor)
if err != nil {
- return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
+ return util.Uint256{}, fmt.Errorf("could not calculate notary deposit: %w", err)
}
- return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
+ return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
}
-func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
- if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
- return err
+var (
+ errNotaryDepositFail = errors.New("notary deposit tx has faulted")
+ errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
+)
+
+func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
+ for i := 0; i < notaryDepositRetriesAmount; i++ {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ ok, err := c.cfgMorph.client.TxHalt(tx)
+ if err == nil {
+ if ok {
+ return nil
+ }
+
+ return errNotaryDepositFail
+ }
+
+ err = c.cfgMorph.client.Wait(ctx, 1)
+ if err != nil {
+ return fmt.Errorf("could not wait for one block in chain: %w", err)
+ }
}
- c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
- return nil
+ return errNotaryDepositTimeout
}
func listenMorphNotifications(ctx context.Context, c *cfg) {
@@ -166,31 +188,26 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
err error
subs subscriber.Subscriber
)
- log := c.log.WithTag(logger.TagMorph)
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
+ c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
- Log: log,
+ Log: c.log,
StartFromBlock: fromSideChainBlock,
Client: c.cfgMorph.client,
})
fatalOnErr(err)
lis, err := event.NewListener(event.ListenerParams{
- Logger: log,
+ Logger: c.log,
Subscriber: subs,
})
fatalOnErr(err)
- c.onShutdown(func() {
- lis.Stop()
- })
-
c.workers = append(c.workers, newWorkerFromFunc(func(wCtx context.Context) {
runAndLog(wCtx, c, "morph notification", false, func(lCtx context.Context, c *cfg) {
lis.ListenWithError(lCtx, c.internalErr)
@@ -200,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
- log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
+ c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@@ -210,12 +227,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
- registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
- log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
+ registerBlockHandler(lis, func(block *block.Block) {
+ c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
- log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
+ c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}
@@ -223,20 +240,29 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
}
func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parsers map[event.Type]event.NotificationParser,
- subs map[event.Type][]event.Handler,
-) {
+ subs map[event.Type][]event.Handler) {
for typ, handlers := range subs {
+ pi := event.NotificationParserInfo{}
+ pi.SetType(typ)
+ pi.SetScriptHash(scHash)
+
p, ok := parsers[typ]
if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ))
}
- lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
- Contract: scHash,
- Type: typ,
- Parser: p,
- Handlers: handlers,
- })
+ pi.SetParser(p)
+
+ lis.SetNotificationParser(pi)
+
+ for _, h := range handlers {
+ hi := event.NotificationHandlerInfo{}
+ hi.SetType(typ)
+ hi.SetScriptHash(scHash)
+ hi.SetHandler(h)
+
+ lis.RegisterNotificationHandler(hi)
+ }
}
}
@@ -258,13 +284,15 @@ func lookupScriptHashesInNNS(c *cfg) {
{&c.cfgNetmap.scriptHash, client.NNSNetmapContractName},
{&c.cfgAccounting.scriptHash, client.NNSBalanceContractName},
{&c.cfgContainer.scriptHash, client.NNSContainerContractName},
- {&c.cfgFrostfsID.scriptHash, client.NNSFrostFSIDContractName},
{&c.cfgMorph.proxyScriptHash, client.NNSProxyContractName},
- {&c.cfgObject.cfgAccessPolicyEngine.policyContractHash, client.NNSPolicyContractName},
}
)
for _, t := range targets {
+ if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
+ continue // ignore proxy contract if notary disabled
+ }
+
if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 7dfb4fe12..ebe152e47 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -5,13 +5,12 @@ import (
"context"
"errors"
"fmt"
- "net"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -19,11 +18,9 @@ import (
netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
// primary solution of local network state dump.
@@ -32,7 +29,7 @@ type networkState struct {
controlNetStatus atomic.Int32 // control.NetmapStatus
- nodeInfo atomic.Value // netmapSDK.NodeInfo
+ nodeInfo atomic.Value // *netmapSDK.NodeInfo
metrics *metrics.NodeMetrics
}
@@ -62,15 +59,13 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
if ni != nil {
s.nodeInfo.Store(*ni)
- switch ni.Status() {
- case netmapSDK.Online:
+ switch {
+ case ni.IsOnline():
ctrlNetSt = control.NetmapStatus_ONLINE
- case netmapSDK.Offline:
+ case ni.IsOffline():
ctrlNetSt = control.NetmapStatus_OFFLINE
- case netmapSDK.Maintenance:
+ case ni.IsMaintenance():
ctrlNetSt = control.NetmapStatus_MAINTENANCE
- case netmapSDK.UnspecifiedState:
- ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
}
} else {
ctrlNetSt = control.NetmapStatus_OFFLINE
@@ -81,7 +76,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
// nil ni means that the node is not included
// in the netmap
- niOld.SetStatus(netmapSDK.Offline)
+ niOld.SetOffline()
s.nodeInfo.Store(niOld)
}
@@ -105,7 +100,9 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) {
v := s.nodeInfo.Load()
if v != nil {
res, ok = v.(netmapSDK.NodeInfo)
- assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v))
+ if !ok {
+ panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v))
+ }
}
return
@@ -123,11 +120,7 @@ func nodeKeyFromNetmap(c *cfg) []byte {
func (c *cfg) iterateNetworkAddresses(f func(string) bool) {
ni, ok := c.cfgNetmap.state.getNodeInfo()
if ok {
- for s := range ni.NetworkEndpoints() {
- if f(s) {
- return
- }
- }
+ ni.IterateNetworkEndpoints(f)
}
}
@@ -144,11 +137,13 @@ func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
- c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
+ c.cfgNodeInfo.localInfo.SetOffline()
- c.initMorphComponents(ctx)
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
- initNetmapState(ctx, c)
+ initNetmapState(c)
server := netmapTransportGRPC.New(
netmapService.NewSignService(
@@ -167,54 +162,67 @@ func initNetmapService(ctx context.Context, c *cfg) {
),
)
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- netmapGRPC.RegisterNetmapServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server)
- })
+ for _, srv := range c.cfgGRPC.servers {
+ netmapGRPC.RegisterNetmapServiceServer(srv, server)
+ }
addNewEpochNotificationHandlers(c)
}
func addNewEpochNotificationHandlers(c *cfg) {
- addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
+ addNewEpochNotificationHandler(c, func(ev event.Event) {
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
})
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
- e := ev.(netmapEvent.NewEpoch).EpochNumber()
-
- c.updateContractNodeInfo(ctx, e)
-
- if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
return
}
- if err := c.bootstrap(ctx); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
+ if err := c.bootstrap(); err != nil {
+ c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
- addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
- _, _, err := makeNotaryDeposit(ctx, c)
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ e := ev.(netmapEvent.NewEpoch).EpochNumber()
+
+ ni, err := c.netmapLocalNodeState(e)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
- zap.Error(err),
+ c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
+ zap.Uint64("epoch", e),
+ zap.String("error", err.Error()),
)
+
+ return
}
+
+ c.handleLocalNodeInfo(ni)
})
+
+ if c.cfgMorph.notaryEnabled {
+ addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
+ _, err := makeNotaryDeposit(c)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
+ zap.String("error", err.Error()),
+ )
+ }
+ })
+ }
}
// bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService.
-func bootstrapNode(ctx context.Context, c *cfg) {
- if c.IsMaintenance() {
- c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
- return
+func bootstrapNode(c *cfg) {
+ if c.needBootstrap() {
+ if c.IsMaintenance() {
+ c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
+ return
+ }
+ err := c.bootstrap()
+ fatalOnErrDetails("bootstrap error", err)
}
- err := c.bootstrap(ctx)
- fatalOnErrDetails("bootstrap error", err)
}
func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) {
@@ -239,47 +247,46 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
-func initNetmapState(ctx context.Context, c *cfg) {
- epoch, err := c.cfgNetmap.wrapper.Epoch(ctx)
+func initNetmapState(c *cfg) {
+ epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
- ni, err = c.netmapInitLocalNodeState(ctx, epoch)
+ ni, err = c.netmapInitLocalNodeState(epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
- c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
+ c.log.Info(logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
- if ni != nil && ni.Status().IsMaintenance() {
+ if ni != nil && ni.IsMaintenance() {
c.isMaintenance.Store(true)
}
c.cfgNetmap.state.setCurrentEpoch(epoch)
- c.setContractNodeInfo(ni)
+ c.cfgNetmap.startEpoch = epoch
+ c.handleLocalNodeInfo(ni)
}
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
- switch ni.Status() {
- case netmapSDK.Online:
+ switch {
+ case ni.IsOnline():
return "online"
- case netmapSDK.Offline:
+ case ni.IsOffline():
return "offline"
- case netmapSDK.Maintenance:
+ case ni.IsMaintenance():
return "maintenance"
- case netmapSDK.UnspecifiedState:
- return "undefined"
}
}
return "undefined"
}
-func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
- nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx)
+func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
+ nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
}
@@ -292,7 +299,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
}
}
- node, err := c.netmapLocalNodeState(ctx, epoch)
+ node, err := c.netmapLocalNodeState(epoch)
if err != nil {
return nil, err
}
@@ -306,16 +313,16 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
- c.log.Info(ctx, logs.CandidateStatusPriority,
+ c.log.Info(logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
return candidate, nil
}
-func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
+func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state
- nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch)
+ nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil {
return nil, err
}
@@ -350,42 +357,39 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
)
}
-func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
+var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
+
+func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
switch st {
default:
return fmt.Errorf("unsupported status %v", st)
case control.NetmapStatus_MAINTENANCE:
- return c.setMaintenanceStatus(ctx, false)
+ return c.setMaintenanceStatus(false)
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
}
- c.stopMaintenance(ctx)
+ c.stopMaintenance()
+
+ if !c.needBootstrap() {
+ return errRelayBootstrap
+ }
if st == control.NetmapStatus_ONLINE {
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
- return bootstrapOnline(ctx, c)
+ return bootstrapOnline(c)
}
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
- return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
+ return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
}
-func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) {
- epoch, err := c.netMapSource.Epoch(ctx)
- if err != nil {
- return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
- }
- st := c.NetmapStatus()
- return st, epoch, nil
+func (c *cfg) ForceMaintenance() error {
+ return c.setMaintenanceStatus(true)
}
-func (c *cfg) ForceMaintenance(ctx context.Context) error {
- return c.setMaintenanceStatus(ctx, true)
-}
-
-func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
- netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx)
+func (c *cfg) setMaintenanceStatus(force bool) error {
+ netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed {
@@ -393,10 +397,10 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
}
if err == nil || force {
- c.startMaintenance(ctx)
+ c.startMaintenance()
if err == nil {
- err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
+ err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
}
if err != nil {
@@ -409,16 +413,12 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
// calls UpdatePeerState operation of Netmap contract's client for the local node.
// State setter is used to specify node state to switch to.
-func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
+func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
var prm nmClient.UpdatePeerPrm
prm.SetKey(c.key.PublicKey().Bytes())
stateSetter(&prm)
- res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
- if err != nil {
- return err
- }
- return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
+ return c.cfgNetmap.wrapper.UpdatePeerState(prm)
}
type netInfo struct {
@@ -433,7 +433,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error)
}
-func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) {
+func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber()
if err != nil {
return nil, err
@@ -443,7 +443,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic)
- netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx)
+ netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
}
diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go
deleted file mode 100644
index e6be9cdf5..000000000
--- a/cmd/frostfs-node/netmap_source.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package main
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-type rawNetmapSource struct {
- client *netmapClient.Client
-}
-
-func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
- return &rawNetmapSource{
- client: client,
- }
-}
-
-func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
- nm, err := s.client.GetNetMap(ctx, diff)
- if err != nil {
- return nil, err
- }
- candidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- return nil, err
- }
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- return nm, nil
-}
-
-func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
- if err != nil {
- return nil, err
- }
- candidates, err := s.client.GetCandidates(ctx)
- if err != nil {
- return nil, err
- }
- updates := getNetMapNodesToUpdate(nm, candidates)
- if len(updates) > 0 {
- mergeNetmapWithCandidates(updates, nm)
- }
- return nm, nil
-}
-
-func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
- return s.client.Epoch(ctx)
-}
diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go
new file mode 100644
index 000000000..8e581505f
--- /dev/null
+++ b/cmd/frostfs-node/notificator.go
@@ -0,0 +1,173 @@
+package main
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/notificator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/notificator/nats"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type notificationSource struct {
+ e *engine.StorageEngine
+ l *logger.Logger
+ defaultTopic string
+}
+
+func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler func(topic string, addr oid.Address)) {
+ log := n.l.With(zap.Uint64("epoch", epoch))
+
+ listRes, err := n.e.ListContainers(ctx, engine.ListContainersPrm{})
+ if err != nil {
+ log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
+ return
+ }
+
+ filters := objectSDK.NewSearchFilters()
+ filters.AddNotificationEpochFilter(epoch)
+
+ var selectPrm engine.SelectPrm
+ selectPrm.WithFilters(filters)
+
+ for _, c := range listRes.Containers() {
+ selectPrm.WithContainerID(c)
+
+ selectRes, err := n.e.Select(ctx, selectPrm)
+ if err != nil {
+ log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
+ zap.Stringer("cid", c),
+ zap.Error(err),
+ )
+ continue
+ }
+
+ for _, a := range selectRes.AddressList() {
+ err = n.processAddress(ctx, a, handler)
+ if err != nil {
+ log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
+ zap.Stringer("address", a),
+ zap.Error(err),
+ )
+ continue
+ }
+ }
+ }
+
+ log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
+}
+
+func (n *notificationSource) processAddress(
+ ctx context.Context,
+ a oid.Address,
+ h func(topic string, addr oid.Address),
+) error {
+ var prm engine.HeadPrm
+ prm.WithAddress(a)
+
+ res, err := n.e.Head(ctx, prm)
+ if err != nil {
+ return err
+ }
+
+ ni, err := res.Header().NotificationInfo()
+ if err != nil {
+ return fmt.Errorf("could not retrieve notification topic from object: %w", err)
+ }
+
+ topic := ni.Topic()
+
+ if topic == "" {
+ topic = n.defaultTopic
+ }
+
+ h(topic, a)
+
+ return nil
+}
+
+type notificationWriter struct {
+ l *logger.Logger
+ w *nats.Writer
+}
+
+func (n notificationWriter) Notify(topic string, address oid.Address) {
+ if err := n.w.Notify(topic, address); err != nil {
+ n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
+ zap.Stringer("address", address),
+ zap.String("topic", topic),
+ zap.Error(err),
+ )
+ }
+}
+
+func initNotifications(ctx context.Context, c *cfg) {
+ if nodeconfig.Notification(c.appCfg).Enabled() {
+ topic := nodeconfig.Notification(c.appCfg).DefaultTopic()
+ pubKey := hex.EncodeToString(c.cfgNodeInfo.localInfo.PublicKey())
+
+ if topic == "" {
+ topic = pubKey
+ }
+
+ natsSvc := nats.New(
+ nats.WithConnectionName("FrostFS Storage Node: "+pubKey), // connection name is used in the server side logs
+ nats.WithTimeout(nodeconfig.Notification(c.appCfg).Timeout()),
+ nats.WithClientCert(
+ nodeconfig.Notification(c.appCfg).CertPath(),
+ nodeconfig.Notification(c.appCfg).KeyPath(),
+ ),
+ nats.WithRootCA(nodeconfig.Notification(c.appCfg).CAPath()),
+ nats.WithLogger(c.log),
+ )
+
+ c.cfgNotifications = cfgNotifications{
+ enabled: true,
+ nw: notificationWriter{
+ l: c.log,
+ w: natsSvc,
+ },
+ defaultTopic: topic,
+ }
+
+ n := notificator.New(new(notificator.Prm).
+ SetLogger(c.log).
+ SetNotificationSource(
+ ¬ificationSource{
+ e: c.cfgObject.cfgLocalStorage.localStorage,
+ l: c.log,
+ defaultTopic: topic,
+ }).
+ SetWriter(c.cfgNotifications.nw),
+ )
+
+ addNewEpochAsyncNotificationHandler(c, func(e event.Event) {
+ ev := e.(netmap.NewEpoch)
+
+ n.ProcessEpoch(ctx, ev.EpochNumber())
+ })
+ }
+}
+
+func connectNats(ctx context.Context, c *cfg) {
+ if !c.cfgNotifications.enabled {
+ return
+ }
+
+ endpoint := nodeconfig.Notification(c.appCfg).Endpoint()
+ err := c.cfgNotifications.nw.w.Connect(ctx, endpoint)
+ if err != nil {
+ panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err))
+ } else {
+ c.log.Info(logs.NatsConnectedToEndpoint, zap.String("endpoint", endpoint))
+ }
+}
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index c33c02b3f..34847e36f 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -2,27 +2,32 @@ package main
import (
"context"
+ "errors"
"fmt"
- "net"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
getsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get/v2"
- patchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/patch"
+ headsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/head"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
putsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put/v2"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
@@ -31,15 +36,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
type objectSvc struct {
@@ -50,29 +53,23 @@ type objectSvc struct {
get *getsvcV2.Service
delete *deletesvcV2.Service
-
- patch *patchsvc.Service
}
-func (c *cfg) MaxObjectSize(ctx context.Context) uint64 {
- sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx)
+func (c *cfg) MaxObjectSize() uint64 {
+ sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
- zap.Error(err),
+ c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
+ zap.String("error", err.Error()),
)
}
return sz
}
-func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
+func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
return s.put.Put()
}
-func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
- return s.patch.Patch()
-}
-
func (s *objectSvc) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
return s.put.PutSingle(ctx, req)
}
@@ -103,15 +100,16 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe
type delNetInfo struct {
netmap.State
+ tsLifetime uint64
cfg *cfg
}
func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
- return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
+ return i.tsLifetime, nil
}
-// LocalNodeID returns node owner ID calculated from configured private key.
+// returns node owner ID calculated from configured private key.
//
// Implements method needed for Object.Delete service.
func (i *delNetInfo) LocalNodeID() user.ID {
@@ -122,8 +120,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client
}
-func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) {
- keys, err := fn.sidechain.NeoFSAlphabetList(ctx)
+func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
+ keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
}
@@ -136,6 +134,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]by
return result, nil
}
+type innerRingFetcherWithoutNotary struct {
+ nm *nmClient.Client
+}
+
+func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
+ keys, err := f.nm.GetInnerRingList()
+ if err != nil {
+ return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
+ }
+
+ result := make([][]byte, 0, len(keys))
+ for i := range keys {
+ result = append(result, keys[i].Bytes())
+ }
+
+ return result, nil
+}
+
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@@ -150,32 +166,29 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
- sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource,
- c.ObjectCfg.priorityMetrics)
+ sGet := createGetService(c, keyStorage, traverseGen, c.clientCache)
*c.cfgObject.getSvc = *sGet // need smth better
- sGetV2 := createGetServiceV2(c, sGet, keyStorage)
+ sGetV2 := createGetServiceV2(sGet, keyStorage)
sDelete := createDeleteService(c, keyStorage, sGet, sSearch, sPut)
sDeleteV2 := createDeleteServiceV2(sDelete)
- sPatch := createPatchSvc(sGet, sPut)
-
// build service pipeline
- // grpc | audit | qos | | signature | response | acl | ape | split
+ // grpc | | signature | response | acl | split
- splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
+ splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2)
- apeSvc := createAPEService(c, &irFetcher, splitSvc)
+ aclSvc := createACLServiceV2(c, splitSvc, &irFetcher)
var commonSvc objectService.Common
- commonSvc.Init(&c.internals, apeSvc)
+ commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@@ -187,23 +200,18 @@ func initObjectService(c *cfg) {
respSvc,
)
- c.metricsSvc = objectService.NewMetricCollector(
+ c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
- qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
- auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
- server := objectTransportGRPC.New(auditSvc)
+ server := objectTransportGRPC.New(c.shared.metricsSvc)
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- objectGRPC.RegisterObjectServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server)
- })
+ for _, srv := range c.cfgGRPC.servers {
+ objectGRPC.RegisterObjectServiceServer(srv, server)
+ }
}
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
if policerconfig.UnsafeDisable(c.appCfg) {
- c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
+ c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
return
}
@@ -214,12 +222,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
- return ls.Inhume(ctx, prm)
+ _, err := ls.Inhume(ctx, prm)
+ return err
}
- remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
+ remoteHeader := headsvc.NewRemoteHeader(keyStorage, clientConstructor)
+
pol := policer.New(
- policer.WithLogger(c.log.WithTag(logger.TagPolicer)),
+ policer.WithLogger(c.log),
policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}),
policer.WithBuryFunc(buryFn),
policer.WithContainerSource(c.cfgObject.cnrSource),
@@ -227,33 +237,11 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
placement.NewNetworkMapSourceBuilder(c.netMapSource),
),
policer.WithRemoteObjectHeaderFunc(
- func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- prm := new(objectService.RemoteRequestPrm).WithNodeInfo(ni).WithObjectAddress(a).WithRaw(raw)
- return remoteReader.Head(ctx, prm)
+ func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
+ prm := new(headsvc.RemoteHeadPrm).WithNodeInfo(ni).WithObjectAddress(a)
+ return remoteHeader.Head(ctx, prm)
},
),
- policer.WithLocalObjectHeaderFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- var prm engine.HeadPrm
- prm.WithAddress(a)
- res, err := c.cfgObject.cfgLocalStorage.localStorage.Head(ctx, prm)
- if err != nil {
- return nil, err
- }
- return res.Header(), nil
- }),
- policer.WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
- prm := new(objectService.RemoteRequestPrm).WithNodeInfo(ni).WithObjectAddress(a)
- return remoteReader.Get(ctx, prm)
- }),
- policer.WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- var prm engine.GetPrm
- prm.WithAddress(a)
- res, err := c.cfgObject.cfgLocalStorage.localStorage.Get(ctx, prm)
- if err != nil {
- return nil, err
- }
- return res.Object(), nil
- }),
policer.WithNetmapKeys(c),
policer.WithHeadTimeout(
policerconfig.HeadTimeout(c.appCfg),
@@ -263,15 +251,15 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
- if err := ls.Inhume(ctx, inhumePrm); err != nil {
- c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
- zap.Error(err),
+ _, err := ls.Inhume(ctx, inhumePrm)
+ if err != nil {
+ c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
+ zap.String("error", err.Error()),
)
}
}),
+ policer.WithMaxCapacity(c.cfgObject.pool.replicatorPoolSize),
policer.WithPool(c.cfgObject.pool.replication),
- policer.WithMetrics(c.metricsCollector.PolicerMetrics()),
- policer.WithKeyStorage(keyStorage),
)
c.workers = append(c.workers, worker{
@@ -281,9 +269,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
-func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
- return &innerRingFetcherWithNotary{
- sidechain: c.cfgMorph.client,
+func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
+ if c.cfgMorph.client.ProbeNotary() {
+ return &innerRingFetcherWithNotary{
+ sidechain: c.cfgMorph.client,
+ }
+ }
+ return &innerRingFetcherWithoutNotary{
+ nm: c.cfgNetmap.wrapper,
}
}
@@ -291,16 +284,13 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
ls := c.cfgObject.cfgLocalStorage.localStorage
return replicator.New(
- replicator.WithLogger(c.log.WithTag(logger.TagReplicator)),
+ replicator.WithLogger(c.log),
replicator.WithPutTimeout(
replicatorconfig.PutTimeout(c.appCfg),
),
replicator.WithLocalStorage(ls),
replicator.WithRemoteSender(
- objectwriter.NewRemoteSender(keyStorage, cache),
- ),
- replicator.WithRemoteGetter(
- getsvc.NewRemoteGetter(c.clientCache, c.netMapSource, keyStorage),
+ putsvc.NewRemoteSender(keyStorage, cache),
),
replicator.WithMetrics(c.metricsCollector.Replicator()),
)
@@ -309,10 +299,19 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
- var os objectwriter.ObjectStorage = engineWithoutNotifications{
+ var os putsvc.ObjectStorage = engineWithoutNotifications{
engine: ls,
}
+ if c.cfgNotifications.enabled {
+ os = engineWithNotifications{
+ base: os,
+ nw: c.cfgNotifications.nw,
+ ns: c.cfgNetmap.state,
+ defaultTopic: c.cfgNotifications.defaultTopic,
+ }
+ }
+
return putsvc.NewService(
keyStorage,
c.putClientCache,
@@ -323,8 +322,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
- objectwriter.WithLogger(c.log),
- objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
+ putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
+ putsvc.WithLogger(c.log),
+ putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
}
@@ -332,11 +332,7 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
return putsvcV2.NewService(sPut, keyStorage)
}
-func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Service {
- return patchsvc.NewService(sPut.Config, sGet)
-}
-
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -347,8 +343,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav
),
c.netMapSource,
keyStorage,
- containerSource,
- searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)),
+ searchsvc.WithLogger(c.log),
)
}
@@ -357,10 +352,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
}
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
- coreConstructor *cache.ClientCache,
- containerSource containercore.Source,
- priorityMetrics []placement.Metric,
-) *getsvc.Service {
+ coreConstructor *cache.ClientCache) *getsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return getsvc.New(
@@ -369,40 +361,32 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra
ls,
traverseGen.WithTraverseOptions(
placement.SuccessAfter(1),
- placement.WithPriorityMetrics(priorityMetrics),
- placement.WithNodeState(c),
),
coreConstructor,
- containerSource,
- getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc)))
+ getsvc.WithLogger(c.log))
}
-func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service {
+func createGetServiceV2(sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service {
return getsvcV2.NewService(
- sGet,
- keyStorage,
- c.clientCache,
- c.netMapSource,
- c,
- c.cfgObject.cnrSource,
- getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)),
+ getsvcV2.WithInternalService(sGet),
+ getsvcV2.WithKeyStorage(keyStorage),
)
}
func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Service,
- sSearch *searchsvc.Service, sPut *putsvc.Service,
-) *deletesvc.Service {
+ sSearch *searchsvc.Service, sPut *putsvc.Service) *deletesvc.Service {
return deletesvc.New(
sGet,
sSearch,
sPut,
&delNetInfo{
- State: c.cfgNetmap.state,
+ State: c.cfgNetmap.state,
+ tsLifetime: c.cfgObject.tombstoneLifetime,
cfg: c,
},
keyStorage,
- deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)),
+ deletesvc.WithLogger(c.log),
)
}
@@ -411,8 +395,7 @@ func createDeleteServiceV2(sDelete *deletesvc.Service) *deletesvcV2.Service {
}
func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Service,
- sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service, sPatch *patchsvc.Service,
-) *objectService.TransportSplitter {
+ sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service) *objectService.TransportSplitter {
return objectService.NewTransportSplitter(
c.cfgGRPC.maxChunkSize,
c.cfgGRPC.maxAddrAmount,
@@ -421,28 +404,91 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
search: sSearchV2,
get: sGetV2,
delete: sDeleteV2,
- patch: sPatch,
},
)
}
-func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
- return objectAPE.NewService(
- objectAPE.NewChecker(
- c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
- c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
- objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
- c.frostfsidClient,
- c.netMapSource,
- c.cfgNetmap.state,
- c.cfgObject.cnrSource,
- c.binPublicKey,
- ),
- objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
+func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter, irFetcher *cachedIRFetcher) v2.Service {
+ ls := c.cfgObject.cfgLocalStorage.localStorage
+
+ return v2.New(
splitSvc,
+ c.netMapSource,
+ irFetcher,
+ acl.NewChecker(
+ c.cfgNetmap.state,
+ c.cfgObject.eaclSource,
+ eaclSDK.NewValidator(),
+ ls),
+ c.cfgObject.cnrSource,
+ v2.WithLogger(c.log),
)
}
+type morphEACLFetcher struct {
+ w *cntClient.Client
+}
+
+func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) {
+ eaclInfo, err := s.w.GetEACL(cnr)
+ if err != nil {
+ return nil, err
+ }
+
+ binTable, err := eaclInfo.Value.Marshal()
+ if err != nil {
+ return nil, fmt.Errorf("marshal eACL table: %w", err)
+ }
+
+ if !eaclInfo.Signature.Verify(binTable) {
+ // TODO(@cthulhu-rider): #468 use "const" error
+ return nil, errors.New("invalid signature of the eACL table")
+ }
+
+ return eaclInfo, nil
+}
+
+type engineWithNotifications struct {
+ base putsvc.ObjectStorage
+ nw notificationWriter
+ ns netmap.State
+
+ defaultTopic string
+}
+
+func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
+ return e.base.IsLocked(ctx, address)
+}
+
+func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
+ return e.base.Delete(ctx, tombstone, toDelete)
+}
+
+func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
+ return e.base.Lock(ctx, locker, toLock)
+}
+
+func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
+ if err := e.base.Put(ctx, o); err != nil {
+ return err
+ }
+
+ ni, err := o.NotificationInfo()
+ if err == nil {
+ if epoch := ni.Epoch(); epoch == 0 || epoch == e.ns.CurrentEpoch() {
+ topic := ni.Topic()
+
+ if topic == "" {
+ topic = e.defaultTopic
+ }
+
+ e.nw.Notify(topic, objectCore.AddressOf(o))
+ }
+ }
+
+ return nil
+}
+
type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
@@ -462,13 +508,14 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
- return e.engine.Inhume(ctx, prm)
+ _, err := e.engine.Inhume(ctx, prm)
+ return err
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error {
- return engine.Put(ctx, e.engine, o, indexedContainer)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
+ return engine.Put(ctx, e.engine, o)
}
diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go
deleted file mode 100644
index 55f76cc76..000000000
--- a/cmd/frostfs-node/policy_engine.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package main
-
-import (
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/google/uuid"
- "github.com/hashicorp/golang-lru/v2/expirable"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-type accessPolicyEngine struct {
- localOverrideDatabase chainbase.LocalOverrideDatabase
-
- morphChainStorage engine.MorphRuleChainStorageReader
-}
-
-var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
-
-type morphAPEChainCacheKey struct {
- // nolint:unused
- name chain.Name
- // nolint:unused
- target engine.Target
-}
-
-type morphAPEChainCache struct {
- source engine.MorphRuleChainStorageReader
- cache *expirable.LRU[morphAPEChainCacheKey, []*chain.Chain]
-}
-
-func newMorphCache(source engine.MorphRuleChainStorageReader, size int, ttl time.Duration) engine.MorphRuleChainStorageReader {
- return &morphAPEChainCache{
- source: source,
- cache: expirable.NewLRU(size, func(morphAPEChainCacheKey, []*chain.Chain) {}, ttl),
- }
-}
-
-func (m *morphAPEChainCache) GetAdmin() (util.Uint160, error) {
- return m.source.GetAdmin()
-}
-
-func (m *morphAPEChainCache) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
- key := morphAPEChainCacheKey{name: name, target: target}
- result, found := m.cache.Get(key)
- if found {
- return result, nil
- }
-
- result, err := m.source.ListMorphRuleChains(name, target)
- if err != nil {
- return nil, err
- }
-
- m.cache.Add(key, result)
- return result, nil
-}
-
-func (m *morphAPEChainCache) ListTargetsIterator(targetType engine.TargetType) (uuid.UUID, result.Iterator, error) {
- return m.source.ListTargetsIterator(targetType)
-}
-
-func newAccessPolicyEngine(
- morphChainStorage engine.MorphRuleChainStorageReader,
- localOverrideDatabase chainbase.LocalOverrideDatabase,
-) *accessPolicyEngine {
- return &accessPolicyEngine{
- morphChainStorage: morphChainStorage,
-
- localOverrideDatabase: localOverrideDatabase,
- }
-}
-
-func (a *accessPolicyEngine) LocalStorage() engine.LocalOverrideStorage {
- return a.localOverrideDatabase
-}
-
-func (a *accessPolicyEngine) MorphRuleChainStorage() engine.MorphRuleChainStorageReader {
- return a.morphChainStorage
-}
-
-func (a *accessPolicyEngine) LocalOverrideDatabaseCore() chainbase.DatabaseCore {
- return a.localOverrideDatabase
-}
diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go
index e4da8119f..dcd320146 100644
--- a/cmd/frostfs-node/pprof.go
+++ b/cmd/frostfs-node/pprof.go
@@ -1,50 +1,49 @@
package main
import (
- "context"
"runtime"
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
-func initProfilerService(ctx context.Context, c *cfg) {
+func initProfilerService(c *cfg) {
tuneProfilers(c)
pprof, _ := pprofComponent(c)
- pprof.init(ctx, c)
+ pprof.init(c)
}
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
- if c.pprof == nil {
- c.pprof = new(httpComponent)
- c.pprof.cfg = c
- c.pprof.name = "pprof"
- c.pprof.handler = httputil.Handler()
- c.pprof.preReload = tuneProfilers
+ if c.dynamicConfiguration.pprof == nil {
+ c.dynamicConfiguration.pprof = new(httpComponent)
+ c.dynamicConfiguration.pprof.cfg = c
+ c.dynamicConfiguration.pprof.name = "pprof"
+ c.dynamicConfiguration.pprof.handler = httputil.Handler()
+ c.dynamicConfiguration.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
- if enabled != c.pprof.enabled {
- c.pprof.enabled = enabled
+ if enabled != c.dynamicConfiguration.pprof.enabled {
+ c.dynamicConfiguration.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
- if address != c.pprof.address {
- c.pprof.address = address
+ if address != c.dynamicConfiguration.pprof.address {
+ c.dynamicConfiguration.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
- if dur != c.pprof.shutdownDur {
- c.pprof.shutdownDur = dur
+ if dur != c.dynamicConfiguration.pprof.shutdownDur {
+ c.dynamicConfiguration.pprof.shutdownDur = dur
updated = true
}
- return c.pprof, updated
+ return c.dynamicConfiguration.pprof, updated
}
func tuneProfilers(c *cfg) {
diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go
deleted file mode 100644
index 6394b668b..000000000
--- a/cmd/frostfs-node/qos.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package main
-
-import (
- "bytes"
- "context"
-
- qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "go.uber.org/zap"
-)
-
-type cfgQoSService struct {
- netmapSource netmap.Source
- logger *logger.Logger
- allowedCriticalPubs [][]byte
- allowedInternalPubs [][]byte
-}
-
-func initQoSService(c *cfg) {
- criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
- internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
- rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
- rawInternalPubs := make([][]byte, 0, len(internalPubs))
- for i := range criticalPubs {
- rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
- }
- for i := range internalPubs {
- rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
- }
-
- c.cfgQoSService = cfgQoSService{
- netmapSource: c.netMapSource,
- logger: c.log,
- allowedCriticalPubs: rawCriticalPubs,
- allowedInternalPubs: rawInternalPubs,
- }
-}
-
-func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
- rawTag, defined := qosTagging.IOTagFromContext(ctx)
- if !defined {
- if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
- }
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
- ioTag, err := qos.FromRawString(rawTag)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
-
- switch ioTag {
- case qos.IOTagClient:
- return ctx
- case qos.IOTagCritical:
- for _, pk := range s.allowedCriticalPubs {
- if bytes.Equal(pk, requestSignPublicKey) {
- return ctx
- }
- }
- nm, err := s.netmapSource.GetNetMap(ctx, 0)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
- for _, node := range nm.Nodes() {
- if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
- return ctx
- }
- }
- s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- case qos.IOTagInternal:
- if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
- return ctx
- }
- s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- default:
- s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
- return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
- }
-}
-
-func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
- for _, pk := range s.allowedInternalPubs {
- if bytes.Equal(pk, publicKey) {
- return true
- }
- }
- nm, err := s.netmapSource.GetNetMap(ctx, 0)
- if err != nil {
- s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
- return false
- }
- for _, node := range nm.Nodes() {
- if bytes.Equal(node.PublicKey(), publicKey) {
- return true
- }
- }
-
- return false
-}
diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go
deleted file mode 100644
index 971f9eebf..000000000
--- a/cmd/frostfs-node/qos_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package main
-
-import (
- "context"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestQoSService_Client(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag client defined", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Request)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
-}
-
-func TestQoSService_Internal(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
- t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagInternal.String(), tag)
- })
-}
-
-func TestQoSService_Critical(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagCritical.String(), tag)
- })
- t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.Critical)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagCritical.String(), tag)
- })
-}
-
-func TestQoSService_NetmapGetError(t *testing.T) {
- t.Parallel()
- s, pk := testQoSServicePrepare(t)
- s.netmapSource = &utilTesting.TestNetmapSource{}
- t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
- ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
- t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
- ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
- ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
- tag, ok := tagging.IOTagFromContext(ctx)
- require.True(t, ok)
- require.Equal(t, qos.IOTagClient.String(), tag)
- })
-}
-
-func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
- nmSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- reqSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- allowedCritSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- allowedIntSigner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- var node netmap.NodeInfo
- node.SetPublicKey(nmSigner.PublicKey().Bytes())
- nm := &netmap.NetMap{}
- nm.SetEpoch(100)
- nm.SetNodes([]netmap.NodeInfo{node})
-
- return &cfgQoSService{
- logger: test.NewLogger(t),
- netmapSource: &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
- 100: nm,
- },
- CurrentEpoch: 100,
- },
- allowedCriticalPubs: [][]byte{
- allowedCritSigner.PublicKey().Bytes(),
- },
- allowedInternalPubs: [][]byte{
- allowedIntSigner.PublicKey().Bytes(),
- },
- },
- &testQoSServicePublicKeys{
- NetmapNode: nmSigner.PublicKey().Bytes(),
- Request: reqSigner.PublicKey().Bytes(),
- Internal: allowedIntSigner.PublicKey().Bytes(),
- Critical: allowedCritSigner.PublicKey().Bytes(),
- }
-}
-
-type testQoSServicePublicKeys struct {
- NetmapNode []byte
- Request []byte
- Internal []byte
- Critical []byte
-}
diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go
index f6d398574..d858ba490 100644
--- a/cmd/frostfs-node/runtime.go
+++ b/cmd/frostfs-node/runtime.go
@@ -1,7 +1,6 @@
package main
import (
- "context"
"os"
"runtime/debug"
@@ -10,17 +9,17 @@ import (
"go.uber.org/zap"
)
-func setRuntimeParameters(ctx context.Context, c *cfg) {
+func setRuntimeParameters(c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
- c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
+ c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
- c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
+ c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}
diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go
index fbfe3f5e6..f9c1811a1 100644
--- a/cmd/frostfs-node/session.go
+++ b/cmd/frostfs-node/session.go
@@ -3,9 +3,10 @@ package main
import (
"context"
"fmt"
- "net"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -14,11 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "google.golang.org/grpc"
)
type sessionStorage interface {
@@ -49,21 +46,18 @@ func initSessionService(c *cfg) {
_ = c.privateTokenStore.Close()
})
- addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
+ addNewEpochNotificationHandler(c, func(ev event.Event) {
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
})
server := sessionTransportGRPC.New(
sessionSvc.NewSignService(
&c.key.PrivateKey,
- sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)),
+ sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log),
),
)
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- sessionGRPC.RegisterSessionServiceServer(s, server)
-
- // TODO(@aarifullin): #1487 remove the dual service support.
- s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server)
- })
+ for _, srv := range c.cfgGRPC.servers {
+ sessionGRPC.RegisterSessionServiceServer(srv, server)
+ }
}
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index 65f5aec15..08dc049da 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -11,25 +11,21 @@ import (
)
func initTracing(ctx context.Context, c *cfg) {
- conf, err := tracingconfig.ToTracingConfig(c.appCfg)
+ conf := tracingconfig.ToTracingConfig(c.appCfg)
+
+ _, err := tracing.Setup(ctx, *conf)
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
- return
- }
- _, err = tracing.Setup(ctx, *conf)
- if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
- return
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
}
c.closers = append(c.closers, closer{
name: "tracing",
fn: func() {
- ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*5)
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
- err := tracing.Shutdown(ctx) // cfg context cancels before close
+ err := tracing.Shutdown(ctx) //cfg context cancels before close
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index 62af45389..f7c0f2a36 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -3,7 +3,6 @@ package main
import (
"context"
"errors"
- "net"
"time"
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
@@ -14,10 +13,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
type cnrSource struct {
@@ -30,61 +27,55 @@ type cnrSource struct {
cli *containerClient.Client
}
-func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
- return c.src.Get(ctx, id)
+func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
+ return c.src.Get(id)
}
-func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) {
- return c.src.DeletionInfo(ctx, cid)
+func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
+ return c.src.DeletionInfo(cid)
}
-func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) {
- return c.cli.ContainersOf(ctx, nil)
+func (c cnrSource) List() ([]cid.ID, error) {
+ return c.cli.ContainersOf(nil)
}
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
- c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
+ c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
- cli: c.cnrClient,
+ cli: c.shared.cnrClient,
}),
- tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
+ tree.WithEACLSource(c.cfgObject.eaclSource),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
- tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)),
+ tree.WithLogger(c.log),
tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage),
tree.WithContainerCacheSize(treeConfig.CacheSize()),
tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()),
tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()),
tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()),
- tree.WithSyncBatchSize(treeConfig.SyncBatchSize()),
tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()),
- tree.WithMetrics(c.metricsCollector.TreeService()),
- tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
- tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
- tree.WithNetmapState(c.cfgNetmap.state),
- tree.WithDialerSource(c.dialerSource),
- )
+ tree.WithMetrics(c.metricsCollector.TreeService()))
- c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
- tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService))
- })
+ for _, srv := range c.cfgGRPC.servers {
+ tree.RegisterTreeServiceServer(srv, c.treeService)
+ }
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
c.treeService.Start(ctx)
}))
if d := treeConfig.SyncInterval(); d == 0 {
- addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
+ addNewEpochNotificationHandler(c, func(_ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@@ -95,7 +86,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@@ -104,17 +95,17 @@ func initTreeService(c *cfg) {
}()
}
- subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
+ subscribeToContainerRemoval(c, func(e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
- c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
- err := c.treeService.DropTree(ctx, ev.ID, "")
+ c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
+ err := c.treeService.DropTree(context.Background(), ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
- c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
+ c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
})
diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go
index 22d2e0aa9..e07afb2ca 100644
--- a/cmd/frostfs-node/validate.go
+++ b/cmd/frostfs-node/validate.go
@@ -25,16 +25,6 @@ func validateConfig(c *config.Config) error {
return fmt.Errorf("invalid logger level: %w", err)
}
- err = loggerPrm.SetDestination(loggerconfig.Destination(c))
- if err != nil {
- return fmt.Errorf("invalid logger destination: %w", err)
- }
-
- err = loggerPrm.SetTags(loggerconfig.Tags(c))
- if err != nil {
- return fmt.Errorf("invalid list of allowed tags: %w", err)
- }
-
// shard configuration validation
shardNum := 0
@@ -69,7 +59,7 @@ func validateConfig(c *config.Config) error {
default:
return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum)
}
- if blobstor[i].Perm()&0o600 != 0o600 {
+ if blobstor[i].Perm()&0600 != 0600 {
return fmt.Errorf("invalid permissions for blobstor component: %s, "+
"expected at least rw- for the owner (shard %d)",
blobstor[i].Perm(), shardNum)
diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go
index 495365cf0..d9c0f167f 100644
--- a/cmd/frostfs-node/validate_test.go
+++ b/cmd/frostfs-node/validate_test.go
@@ -1,6 +1,7 @@
package main
import (
+ "os"
"path/filepath"
"testing"
@@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
+
+ t.Run("mainnet", func(t *testing.T) {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+ p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
+ c := config.New(p, "", config.EnvPrefix)
+ require.NoError(t, validateConfig(c))
+ })
+ t.Run("testnet", func(t *testing.T) {
+ os.Clearenv() // ENVs have priority over config files, so we do this in tests
+ p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
+ c := config.New(p, "", config.EnvPrefix)
+ require.NoError(t, validateConfig(c))
+ })
}
diff --git a/cmd/frostfs-node/worker.go b/cmd/frostfs-node/worker.go
index c5649073b..bea235c48 100644
--- a/cmd/frostfs-node/worker.go
+++ b/cmd/frostfs-node/worker.go
@@ -30,6 +30,15 @@ func startWorker(ctx context.Context, c *cfg, wrk worker) {
}(wrk)
}
+func delWorker(c *cfg, name string) {
+ for i, worker := range c.workers {
+ if worker.name == name {
+ c.workers = append(c.workers[:i], c.workers[i+1:]...)
+ return
+ }
+ }
+}
+
func getWorker(c *cfg, name string) *worker {
for _, wrk := range c.workers {
if wrk.name == name {
diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go
deleted file mode 100644
index e5a35ab71..000000000
--- a/cmd/internal/common/ape/commands.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package ape
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
- "strconv"
- "strings"
-
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/nspcc-dev/neo-go/cli/input"
- "github.com/spf13/cobra"
-)
-
-const (
- defaultNamespace = "root"
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
-
- Ingress = "ingress"
- S3 = "s3"
-)
-
-var mChainName = map[string]apechain.Name{
- Ingress: apechain.Ingress,
- S3: apechain.S3,
-}
-
-var (
- errSettingDefaultValueWasDeclined = errors.New("setting default value was declined")
- errUnknownTargetType = errors.New("unknown target type")
- errUnsupportedChainName = errors.New("unsupported chain name")
-)
-
-// PrintHumanReadableAPEChain print APE chain rules.
-func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) {
- cmd.Println("Chain ID: " + string(chain.ID))
- cmd.Printf(" HEX: %x\n", chain.ID)
- cmd.Println("Rules:")
- for _, rule := range chain.Rules {
- cmd.Println("\n\tStatus: " + rule.Status.String())
- cmd.Println("\tAny: " + strconv.FormatBool(rule.Any))
- cmd.Println("\tConditions:")
- for _, c := range rule.Condition {
- var ot string
- switch c.Kind {
- case apechain.KindResource:
- ot = "Resource"
- case apechain.KindRequest:
- ot = "Request"
- default:
- panic("unknown object type")
- }
- cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value))
- }
- cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted))
- for _, name := range rule.Actions.Names {
- cmd.Println("\t\t" + name)
- }
- cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted))
- for _, name := range rule.Resources.Names {
- cmd.Println("\t\t" + name)
- }
- }
-}
-
-// ParseTarget handles target parsing of an APE chain.
-func ParseTarget(cmd *cobra.Command) engine.Target {
- typ := ParseTargetType(cmd)
- name, _ := cmd.Flags().GetString(TargetNameFlag)
- switch typ {
- case engine.Namespace:
- if name == "" {
- ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace))
- commonCmd.ExitOnErr(cmd, "read line error: %w", err)
- ln = strings.ToLower(ln)
- if len(ln) > 0 && (ln[0] == 'n') {
- commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined)
- }
- name = defaultNamespace
- }
- return engine.NamespaceTarget(name)
- case engine.Container:
- var cnr cid.ID
- commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
- return engine.ContainerTarget(name)
- case engine.User:
- return engine.UserTarget(name)
- case engine.Group:
- return engine.GroupTarget(name)
- default:
- commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
- }
- panic("unreachable")
-}
-
-// ParseTargetType handles target type parsing of an APE chain.
-func ParseTargetType(cmd *cobra.Command) engine.TargetType {
- typ, _ := cmd.Flags().GetString(TargetTypeFlag)
- switch typ {
- case namespaceTarget:
- return engine.Namespace
- case containerTarget:
- return engine.Container
- case userTarget:
- return engine.User
- case groupTarget:
- return engine.Group
- default:
- commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType)
- }
- panic("unreachable")
-}
-
-// ParseChainID handles the parsing of APE-chain identifier.
-// For some subcommands, chain ID is optional as an input parameter and should be generated by
-// the service instead.
-func ParseChainID(cmd *cobra.Command) (id apechain.ID) {
- chainID, _ := cmd.Flags().GetString(ChainIDFlag)
- id = apechain.ID(chainID)
-
- hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag)
- if !hexEncoded {
- return
- }
-
- chainIDRaw, err := hex.DecodeString(chainID)
- commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
- id = apechain.ID(chainIDRaw)
- return
-}
-
-// ParseChain parses an APE chain which can be provided either as a rule statement
-// or loaded from a binary/JSON file path.
-func ParseChain(cmd *cobra.Command) *apechain.Chain {
- chain := new(apechain.Chain)
- chain.ID = ParseChainID(cmd)
-
- if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 {
- commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules))
- } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" {
- commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath))
- } else {
- commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed"))
- }
-
- cmd.Println("Parsed chain:")
- PrintHumanReadableAPEChain(cmd, chain)
-
- return chain
-}
-
-// ParseChainName parses chain name: the place in the request lifecycle where policy is applied.
-func ParseChainName(cmd *cobra.Command) apechain.Name {
- chainName, _ := cmd.Flags().GetString(ChainNameFlag)
- apeChainName, ok := mChainName[strings.ToLower(chainName)]
- if !ok {
- commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName)
- }
- return apeChainName
-}
diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go
deleted file mode 100644
index d8b2e88a2..000000000
--- a/cmd/internal/common/ape/flags.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package ape
-
-const (
- RuleFlag = "rule"
- PathFlag = "path"
- PathFlagDesc = "Path to encoded chain in JSON or binary format"
- TargetNameFlag = "target-name"
- TargetNameFlagDesc = "Resource name in APE resource name format"
- TargetTypeFlag = "target-type"
- TargetTypeFlagDesc = "Resource type(container/namespace)"
- ChainIDFlag = "chain-id"
- ChainIDFlagDesc = "Chain id"
- ChainIDHexFlag = "chain-id-hex"
- ChainIDHexFlagDesc = "Flag to parse chain ID as hex"
- ChainNameFlag = "chain-name"
- ChainNameFlagDesc = "Chain name(ingress|s3)"
- AllFlag = "all"
-)
-
-const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format:
- [:status_detail] ... ... ...
-
-Status:
- - allow Permits specified actions
- - deny Prohibits specified actions
- - deny:QuotaLimitReached Denies access due to quota limits
-
-Actions:
- Object operations:
- - Object.Put, Object.Get, etc.
- - Object.* (all object operations)
- Container operations:
- - Container.Put, Container.Get, etc.
- - Container.* (all container operations)
-
-Conditions:
- ResourceCondition:
- Format: ResourceCondition:"key"=value, "key"!=value
- Reserved properties (use '\' before '$'):
- - $Object:version
- - $Object:objectID
- - $Object:containerID
- - $Object:ownerID
- - $Object:creationEpoch
- - $Object:payloadLength
- - $Object:payloadHash
- - $Object:objectType
- - $Object:homomorphicHash
-
-RequestCondition:
- Format: RequestCondition:"key"=value, "key"!=value
- Reserved properties (use '\' before '$'):
- - $Actor:publicKey
- - $Actor:role
-
- Example:
- ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others
-
-Resources:
- For objects:
- - namespace/cid/oid (specific object)
- - namespace/cid/* (all objects in container)
- - namespace/* (all objects in namespace)
- - * (all objects)
- - /* (all objects in root namespace)
- - /cid/* (all objects in root container)
- - /cid/oid (specific object in root container)
-
- For containers:
- - namespace/cid (specific container)
- - namespace/* (all containers in namespace)
- - * (all containers)
- - /cid (root container)
- - /* (all root containers)
-
-Notes:
- - Cannot mix object and container operations in one rule
- - Default behavior is Any=false unless 'any' is specified
- - Use 'all' keyword to explicitly set Any=false`
diff --git a/cmd/internal/common/config/viper.go b/cmd/internal/common/config/viper.go
index f06d407c1..41b8831ff 100644
--- a/cmd/internal/common/config/viper.go
+++ b/cmd/internal/common/config/viper.go
@@ -1,7 +1,6 @@
package config
import (
- "errors"
"fmt"
"strings"
@@ -16,8 +15,6 @@ const (
EnvSeparator = "_"
)
-var errProvideViperInOpts = errors.New("provide viper in opts")
-
func CreateViper(opts ...Option) (*viper.Viper, error) {
o := defaultOpts()
for i := range opts {
@@ -62,7 +59,7 @@ func ReloadViper(opts ...Option) error {
}
if o.v == nil {
- return errProvideViperInOpts
+ return fmt.Errorf("provide viper in opts")
}
if o.path != "" {
diff --git a/cmd/internal/common/config/viper_test.go b/cmd/internal/common/config/viper_test.go
deleted file mode 100644
index d533a15c2..000000000
--- a/cmd/internal/common/config/viper_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package config_test
-
-import (
- "encoding/json"
- "os"
- "path"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
- configtest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config/test"
- "github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "gopkg.in/yaml.v3"
-)
-
-func TestCreateReloadViper(t *testing.T) {
- type m = map[string]any
-
- dummyFileSize := 1 << 10
-
- configPath := t.TempDir()
- configFile := "000_a.yaml"
-
- configDirPath := path.Join(configPath, "conf.d")
- require.NoError(t, os.Mkdir(configDirPath, 0o700))
-
- configtest.PrepareConfigFiles(t, configPath, []configtest.ConfigFile{
- configtest.NewConfigFile(configFile, m{"a": "000"}, yaml.Marshal),
- })
-
- // Not valid configs, dummy files those appear lexicographically first.
- configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
- configtest.NewDummyFile("000_file_1", dummyFileSize),
- configtest.NewDummyFile("000_file_2", dummyFileSize),
- configtest.NewDummyFile("000_file_3", dummyFileSize),
- })
-
- configtest.PrepareConfigFiles(t, configDirPath, []configtest.ConfigFile{
- // Valid configs with invalid extensions those appear lexicographically first.
- configtest.NewConfigFile("001_a.yaml.un~", m{"a": "101"}, yaml.Marshal),
- configtest.NewConfigFile("001_b.yml~", m{"b": m{"a": "102", "b": "103"}}, yaml.Marshal),
- configtest.NewConfigFile("001_c.yaml.swp", m{"c": m{"a": "104", "b": "105"}}, yaml.Marshal),
- configtest.NewConfigFile("001_d.json.swp", m{"d": m{"a": "106", "b": "107"}}, json.Marshal),
-
- // Valid configs with valid extensions those should be loaded.
- configtest.NewConfigFile("010_a.yaml", m{"a": "1"}, yaml.Marshal),
- configtest.NewConfigFile("020_b.yml", m{"b": m{"a": "2", "b": "3"}}, yaml.Marshal),
- configtest.NewConfigFile("030_c.json", m{"c": m{"a": "4", "b": "5"}}, json.Marshal),
-
- // Valid configs with invalid extensions those appear lexicographically last.
- configtest.NewConfigFile("099_a.yaml.un~", m{"a": "201"}, yaml.Marshal),
- configtest.NewConfigFile("099_b.yml~", m{"b": m{"a": "202", "b": "203"}}, yaml.Marshal),
- configtest.NewConfigFile("099_c.yaml.swp", m{"c": m{"a": "204", "b": "205"}}, yaml.Marshal),
- configtest.NewConfigFile("099_c.json.swp", m{"d": m{"a": "206", "b": "207"}}, json.Marshal),
- })
-
- // Not valid configs, dummy files those appear lexicographically last.
- configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
- configtest.NewDummyFile("999_file_1", dummyFileSize),
- configtest.NewDummyFile("999_file_2", dummyFileSize),
- configtest.NewDummyFile("999_file_3", dummyFileSize),
- })
-
- finalConfig := m{"a": "1", "b": m{"a": "2", "b": "3"}, "c": m{"a": "4", "b": "5"}}
-
- var (
- v *viper.Viper
- err error
- )
-
- t.Run("create config with config dir only", func(t *testing.T) {
- v, err = config.CreateViper(
- config.WithConfigDir(configDirPath),
- )
- require.NoError(t, err)
- assert.Equal(t, finalConfig, v.AllSettings())
- })
-
- t.Run("reload config with config dir only", func(t *testing.T) {
- err = config.ReloadViper(
- config.WithViper(v),
- config.WithConfigDir(configDirPath),
- )
- require.NoError(t, err)
- assert.Equal(t, finalConfig, v.AllSettings())
- })
-
- t.Run("create config with both config and config dir", func(t *testing.T) {
- v, err = config.CreateViper(
- config.WithConfigFile(path.Join(configPath, configFile)),
- config.WithConfigDir(configDirPath),
- )
- require.NoError(t, err)
- assert.Equal(t, finalConfig, v.AllSettings())
- })
-
- t.Run("reload config with both config and config dir", func(t *testing.T) {
- err = config.ReloadViper(
- config.WithViper(v),
- config.WithConfigFile(path.Join(configPath, configFile)),
- config.WithConfigDir(configDirPath),
- )
- require.NoError(t, err)
- assert.Equal(t, finalConfig, v.AllSettings())
- })
-}
diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go
index 13f447af4..9e4fa3098 100644
--- a/cmd/internal/common/exit.go
+++ b/cmd/internal/common/exit.go
@@ -26,15 +26,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
_ = iota
internal
aclDenied
- apemanagerDenied
)
var (
code int
- internalErr = new(sdkstatus.ServerInternal)
- accessErr = new(sdkstatus.ObjectAccessDenied)
- apemanagerErr = new(sdkstatus.APEManagerAccessDenied)
+ internalErr = new(sdkstatus.ServerInternal)
+ accessErr = new(sdkstatus.ObjectAccessDenied)
)
switch {
@@ -43,21 +41,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
case errors.As(err, &accessErr):
code = aclDenied
err = fmt.Errorf("%w: %s", err, accessErr.Reason())
- case errors.As(err, &apemanagerErr):
- code = apemanagerDenied
- err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason())
default:
code = internal
}
cmd.PrintErrln(err)
- for p := cmd; p != nil; p = p.Parent() {
- if p.PersistentPostRun != nil {
- p.PersistentPostRun(cmd, nil)
- if !cobra.EnableTraverseRunHooks {
- break
- }
- }
+ if cmd.PersistentPostRun != nil {
+ cmd.PersistentPostRun(cmd, nil)
}
os.Exit(code)
}
diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go
index 5dd1a060e..94964e0d2 100644
--- a/cmd/internal/common/netmap.go
+++ b/cmd/internal/common/netmap.go
@@ -10,32 +10,31 @@ import (
// PrettyPrintNodeInfo print information about network node with given indent and index.
// To avoid printing attribute list use short parameter.
func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo,
- index int, indent string, short bool,
-) {
+ index int, indent string, short bool) {
var strState string
- switch node.Status() {
+ switch {
default:
strState = "STATE_UNSUPPORTED"
- case netmap.Online:
+ case node.IsOnline():
strState = "ONLINE"
- case netmap.Offline:
+ case node.IsOffline():
strState = "OFFLINE"
- case netmap.Maintenance:
+ case node.IsMaintenance():
strState = "MAINTENANCE"
}
cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState)
- for endpoint := range node.NetworkEndpoints() {
+ netmap.IterateNetworkEndpoints(node, func(endpoint string) {
cmd.Printf("%s ", endpoint)
- }
+ })
cmd.Println()
if !short {
- for key, value := range node.Attributes() {
+ node.IterateAttributes(func(key, value string) {
cmd.Printf("%s\t%s: %s\n", indent, key, value)
- }
+ })
}
}
diff --git a/config/example/ir.env b/config/example/ir.env
index c13044a6e..3f9530ab6 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -1,7 +1,4 @@
FROSTFS_IR_LOGGER_LEVEL=info
-FROSTFS_IR_LOGGER_TIMESTAMP=true
-FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph"
-FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
@@ -82,12 +79,3 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
FROSTFS_IR_PROMETHEUS_ENABLED=true
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
-
-FROSTFS_MULTINET_ENABLED=true
-FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
-FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
-FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
-FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
-FROSTFS_MULTINET_BALANCER=roundrobin
-FROSTFS_MULTINET_RESTRICT=false
-FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index ed53f014b..a01f3d0bb 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -2,10 +2,6 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
- timestamp: true
- tags:
- - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`.
- level: debug
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
@@ -123,21 +119,3 @@ prometheus:
enabled: true
address: localhost:9090 # Endpoint for application prometheus metrics; disabled by default
shutdown_timeout: 30s # Timeout for metrics HTTP server graceful shutdown
-
-systemdnotify:
- enabled: true
-
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
diff --git a/config/example/node.env b/config/example/node.env
index 9a2426358..fde65173b 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -1,8 +1,4 @@
FROSTFS_LOGGER_LEVEL=debug
-FROSTFS_LOGGER_DESTINATION=journald
-FROSTFS_LOGGER_TIMESTAMP=true
-FROSTFS_LOGGER_TAGS_0_NAMES="main, morph"
-FROSTFS_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_PPROF_ENABLED=true
FROSTFS_PPROF_ADDRESS=localhost:6060
@@ -22,9 +18,16 @@ FROSTFS_NODE_WALLET_PASSWORD=password
FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083"
FROSTFS_NODE_ATTRIBUTE_0=Price:11
FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK"
+FROSTFS_NODE_RELAY=true
FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions
FROSTFS_NODE_PERSISTENT_STATE_PATH=/state
-FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db
+FROSTFS_NODE_NOTIFICATION_ENABLED=true
+FROSTFS_NODE_NOTIFICATION_ENDPOINT=tls://localhost:4222
+FROSTFS_NODE_NOTIFICATION_TIMEOUT=6s
+FROSTFS_NODE_NOTIFICATION_DEFAULT_TOPIC=topic
+FROSTFS_NODE_NOTIFICATION_CERTIFICATE=/cert/path
+FROSTFS_NODE_NOTIFICATION_KEY=/key/path
+FROSTFS_NODE_NOTIFICATION_CA=/ca/path
# Tree service section
FROSTFS_TREE_ENABLED=true
@@ -33,7 +36,6 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32
FROSTFS_TREE_REPLICATION_WORKER_COUNT=32
FROSTFS_TREE_REPLICATION_TIMEOUT=5s
FROSTFS_TREE_SYNC_INTERVAL=1h
-FROSTFS_TREE_SYNC_BATCH_SIZE=2000
FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
# gRPC section
@@ -65,12 +67,8 @@ FROSTFS_MORPH_CACHE_TTL=15s
FROSTFS_MORPH_SWITCH_INTERVAL=3m
FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS="wss://rpc1.morph.frostfs.info:40341/ws"
FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY=0
-FROSTFS_MORPH_RPC_ENDPOINT_0_TRUSTED_CA_LIST="/path/to/ca.pem"
-FROSTFS_MORPH_RPC_ENDPOINT_0_CERTIFICATE="/path/to/cert"
-FROSTFS_MORPH_RPC_ENDPOINT_0_KEY="/path/to/key"
FROSTFS_MORPH_RPC_ENDPOINT_1_ADDRESS="wss://rpc2.morph.frostfs.info:40341/ws"
FROSTFS_MORPH_RPC_ENDPOINT_1_PRIORITY=2
-FROSTFS_MORPH_APE_CHAIN_CACHE_SIZE=100000
# API Client section
FROSTFS_APICLIENT_DIAL_TIMEOUT=15s
@@ -85,25 +83,18 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
-# Container service section
-FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
-
# Object service section
+FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100
+FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
-FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
-
-FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
-FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
-FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
-FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
+FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false
-FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100
### Flag to set shard mode
FROSTFS_STORAGE_SHARD_0_MODE=read-only
### Write cache config
@@ -112,22 +103,16 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_NO_SYNC=true
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH=tmp/0/cache
FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
-FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_WORKERS_NUMBER=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
-FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096
-FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
-FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_FLUSHING_OBJECTS_SIZE=100
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100
FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms
### Blobstor config
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest
+FROSTFS_STORAGE_SHARD_0_COMPRESS=true
FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*"
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true
-FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7
FROSTFS_STORAGE_SHARD_0_SMALL_OBJECT_SIZE=102400
### Blobovnicza config
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH=tmp/0/blob/blobovnicza
@@ -137,10 +122,7 @@ FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
-FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_TTL=1m
-FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=30s
-FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_INIT_WORKER_COUNT=10
-FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_REBUILD_DROP_TIMEOUT=30s
+FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob
@@ -158,55 +140,7 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
#### Limit of objects to be marked expired by the garbage collector
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
#### Limit of concurrent workers collecting expired objects by the garbage collector
-FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
-#### Limits config
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5
-FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50
-FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100
+FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKERS_COUNT=15
## 1 shard
### Flag to refill Metabase from BlobStor
@@ -218,7 +152,7 @@ FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED=true
FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH=tmp/1/cache
FROSTFS_STORAGE_SHARD_1_WRITECACHE_SMALL_OBJECT_SIZE=16384
FROSTFS_STORAGE_SHARD_1_WRITECACHE_MAX_OBJECT_SIZE=134217728
-FROSTFS_STORAGE_SHARD_1_WRITECACHE_FLUSH_WORKER_COUNT=30
+FROSTFS_STORAGE_SHARD_1_WRITECACHE_WORKERS_NUMBER=30
FROSTFS_STORAGE_SHARD_1_WRITECACHE_CAPACITY=4294967296
### Metabase config
FROSTFS_STORAGE_SHARD_1_METABASE_PATH=tmp/1/meta
@@ -235,8 +169,7 @@ FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
-FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_TTL=5m
-FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=15s
+FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob
@@ -258,26 +191,5 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
-FROSTFS_TRACING_TRUSTED_CA=""
-FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0
-FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value
-FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1
-FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
-
-# AUDIT section
-FROSTFS_AUDIT_ENABLED=true
-
-# MULTINET section
-FROSTFS_MULTINET_ENABLED=true
-FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
-FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
-FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
-FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
-FROSTFS_MULTINET_BALANCER=roundrobin
-FROSTFS_MULTINET_RESTRICT=false
-FROSTFS_MULTINET_FALLBACK_DELAY=350ms
-
-FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
-FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
diff --git a/config/example/node.json b/config/example/node.json
index 6b7a9c2c6..e8455ee55 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -1,14 +1,6 @@
{
"logger": {
- "level": "debug",
- "destination": "journald",
- "timestamp": true,
- "tags": [
- {
- "names": "main, morph",
- "level": "debug"
- }
- ]
+ "level": "debug"
},
"pprof": {
"enabled": true,
@@ -37,13 +29,22 @@
],
"attribute_0": "Price:11",
"attribute_1": "UN-LOCODE:RU MSK",
+ "relay": true,
"persistent_sessions": {
"path": "/sessions"
},
"persistent_state": {
"path": "/state"
},
- "locode_db_path": "/path/to/locode/db"
+ "notification": {
+ "enabled": true,
+ "endpoint": "tls://localhost:4222",
+ "timeout": "6s",
+ "default_topic": "topic",
+ "certificate": "/cert/path",
+ "key": "/key/path",
+ "ca": "/ca/path"
+ }
},
"grpc": {
"0": {
@@ -75,7 +76,6 @@
"replication_worker_count": 32,
"replication_timeout": "5s",
"sync_interval": "1h",
- "sync_batch_size": 2000,
"authorized_keys": [
"0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0",
"02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
@@ -103,19 +103,13 @@
"rpc_endpoint": [
{
"address": "wss://rpc1.morph.frostfs.info:40341/ws",
- "priority": 0,
- "trusted_ca_list": [
- "/path/to/ca.pem"
- ],
- "certificate": "/path/to/cert",
- "key": "/path/to/key"
+ "priority": 0
},
{
"address": "wss://rpc2.morph.frostfs.info:40341/ws",
"priority": 2
}
- ],
- "ape_chain_cache_size": 100000
+ ]
},
"apiclient": {
"dial_timeout": "15s",
@@ -130,57 +124,31 @@
"pool_size": 10,
"put_timeout": "15s"
},
- "container": {
- "list_stream": {
- "batch_size": "500"
- }
- },
"object": {
"delete": {
"tombstone_lifetime": 10
},
"put": {
+ "pool_size_remote": 100,
+ "pool_size_local": 200,
"skip_session_token_issuer_verification": true
- },
- "get": {
- "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
}
},
- "rpc": {
- "limits": [
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/PutSingle",
- "/neo.fs.v2.object.ObjectService/Put"
- ],
- "max_ops": 1000
- },
- {
- "methods": [
- "/neo.fs.v2.object.ObjectService/Get"
- ],
- "max_ops": 10000
- }
- ]
- },
"storage": {
+ "shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
"mode": "read-only",
"resync_metabase": false,
- "resync_metabase_worker_count": 100,
"writecache": {
"enabled": false,
"no_sync": true,
"path": "tmp/0/cache",
"small_object_size": 16384,
"max_object_size": 134217728,
- "flush_worker_count": 30,
- "capacity": 3221225472,
- "page_size": 4096,
- "max_object_count": 49,
- "max_flushing_objects_size": 100
+ "workers_number": 30,
+ "capacity": 3221225472
},
"metabase": {
"path": "tmp/0/meta",
@@ -188,15 +156,10 @@
"max_batch_size": 100,
"max_batch_delay": "10ms"
},
- "compression": {
- "enabled": true,
- "level": "fastest",
- "exclude_content_types": [
- "audio/*", "video/*"
- ],
- "estimate_compressibility": true,
- "estimate_compressibility_threshold": 0.7
- },
+ "compress": true,
+ "compression_exclude_content_types": [
+ "audio/*", "video/*"
+ ],
"small_object_size": 102400,
"blobstor": [
{
@@ -207,10 +170,7 @@
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
- "opened_cache_ttl": "1m",
- "opened_cache_exp_interval": "30s",
- "init_worker_count": 10,
- "rebuild_drop_timeout": "30s"
+ "leaf_width": 10
},
{
"type": "fstree",
@@ -228,88 +188,7 @@
"remover_batch_size": 150,
"remover_sleep_interval": "2m",
"expired_collector_batch_size": 1500,
- "expired_collector_worker_count": 15
- },
- "limits": {
- "read": {
- "max_running_ops": 10000,
- "max_waiting_ops": 1000,
- "idle_timeout": "30s",
- "tags": [
- {
- "tag": "internal",
- "weight": 20,
- "limit_ops": 0,
- "reserved_ops": 1000
- },
- {
- "tag": "client",
- "weight": 70,
- "reserved_ops": 10000
- },
- {
- "tag": "background",
- "weight": 5,
- "limit_ops": 10000,
- "reserved_ops": 0
- },
- {
- "tag": "writecache",
- "weight": 5,
- "limit_ops": 25000
- },
- {
- "tag": "policer",
- "weight": 5,
- "limit_ops": 25000,
- "prohibited": true
- },
- {
- "tag": "treesync",
- "weight": 5,
- "limit_ops": 25
- }
- ]
- },
- "write": {
- "max_running_ops": 1000,
- "max_waiting_ops": 100,
- "idle_timeout": "45s",
- "tags": [
- {
- "tag": "internal",
- "weight": 200,
- "limit_ops": 0,
- "reserved_ops": 100
- },
- {
- "tag": "client",
- "weight": 700,
- "reserved_ops": 1000
- },
- {
- "tag": "background",
- "weight": 50,
- "limit_ops": 1000,
- "reserved_ops": 0
- },
- {
- "tag": "writecache",
- "weight": 50,
- "limit_ops": 2500
- },
- {
- "tag": "policer",
- "weight": 50,
- "limit_ops": 2500
- },
- {
- "tag": "treesync",
- "weight": 50,
- "limit_ops": 100
- }
- ]
- }
+ "expired_collector_workers_count": 15
}
},
"1": {
@@ -317,11 +196,12 @@
"resync_metabase": true,
"writecache": {
"enabled": true,
+ "type": "bbolt",
"path": "tmp/1/cache",
"memcache_capacity": 2147483648,
"small_object_size": 16384,
"max_object_size": 134217728,
- "flush_worker_count": 30,
+ "workers_number": 30,
"capacity": 4294967296
},
"metabase": {
@@ -330,9 +210,7 @@
"max_batch_size": 200,
"max_batch_delay": "20ms"
},
- "compression": {
- "enabled": false
- },
+ "compress": false,
"small_object_size": 102400,
"blobstor": [
{
@@ -343,8 +221,7 @@
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
- "opened_cache_ttl": "5m",
- "opened_cache_exp_interval": "15s"
+ "leaf_width": 10
},
{
"type": "fstree",
@@ -370,60 +247,10 @@
},
"tracing": {
"enabled": true,
- "endpoint": "localhost",
- "exporter": "otlp_grpc",
- "trusted_ca": "",
- "attributes":[
- {
- "key": "key0",
- "value": "value"
- },
- {
- "key": "key1",
- "value": "value"
- }
- ]
+ "endpoint": "localhost:9090",
+ "exporter": "otlp_grpc"
},
"runtime": {
"soft_memory_limit": 1073741824
- },
- "audit": {
- "enabled": true
- },
- "multinet": {
- "enabled": true,
- "subnets": [
- {
- "mask": "192.168.219.174/24",
- "source_ips": [
- "192.168.218.185",
- "192.168.219.185"
- ]
- },
- {
- "mask": "10.78.70.74/24",
- "source_ips":[
- "10.78.70.185",
- "10.78.71.185"
- ]
- }
- ],
- "balancer": "roundrobin",
- "restrict": false,
- "fallback_delay": "350ms"
- },
- "qos": {
- "critical": {
- "authorized_keys": [
- "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
- "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
- ]
- },
- "internal": {
- "authorized_keys": [
- "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
- "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
- ]
- }
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 2d4bc90fb..2ca1b426c 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -1,13 +1,5 @@
logger:
level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
- destination: journald # logger destination: one of "stdout" (default), "journald"
- timestamp: true
- tags:
- - names: "main, morph"
- level: debug
-
-systemdnotify:
- enabled: true
pprof:
enabled: true
@@ -34,11 +26,19 @@ node:
- grpcs://localhost:8083
attribute_0: "Price:11"
attribute_1: UN-LOCODE:RU MSK
+ relay: true # start Storage node in relay mode without bootstrapping into the Network map
persistent_sessions:
path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions)
persistent_state:
path: /state # path to persistent state file of Storage node
- "locode_db_path": "/path/to/locode/db"
+ notification:
+ enabled: true # turn on object notification service
+ endpoint: "tls://localhost:4222" # notification server endpoint
+ timeout: "6s" # timeout for object notification client connection
+ default_topic: "topic" # default topic for object notifications if not found in object's meta
+ certificate: "/cert/path" # path to TLS certificate
+ key: "/key/path" # path to TLS key
+ ca: "/ca/path" # path to optional CA certificate
grpc:
- endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server
@@ -62,7 +62,6 @@ tree:
replication_channel_capacity: 32
replication_timeout: 5s
sync_interval: 1h
- sync_batch_size: 2000
authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli
- 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0
- 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56
@@ -82,25 +81,15 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
- cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
- # Negative value disables caching. A zero value sets the default value.
+ cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
- container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
- address: wss://rpc1.morph.frostfs.info:40341/ws
priority: 0
- trusted_ca_list:
- - "/path/to/ca.pem"
- certificate: "/path/to/cert"
- key: "/path/to/key"
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
- ape_chain_cache_size: 100000
- netmap:
- candidates:
- poll_interval: 20s
apiclient:
dial_timeout: 15s # timeout for FrostFS API client connection
@@ -115,31 +104,17 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
-container:
- list_stream:
- batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
-
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
put:
+ pool_size_remote: 100 # number of async workers for remote PUT operations
+ pool_size_local: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
- get:
- priority: # list of metrics of nodes for prioritization
- - $attribute:ClusterName
- - $attribute:UN-LOCODE
-
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 1000
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
storage:
+ # note: shard configuration can be omitted for relay node (see `node.relay`)
+ shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@@ -148,12 +123,13 @@ storage:
writecache:
enabled: true
+ type: bbolt
small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes
- flush_worker_count: 30 # number of write-cache flusher threads
+ workers_number: 30 # number of write-cache flusher threads
metabase:
- perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
+ perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@@ -161,19 +137,17 @@ storage:
max_batch_delay: 5ms # maximum delay for a batch of operations to be executed
max_batch_size: 100 # maximum amount of operations in a single batch
- compression:
- enabled: false # turn on/off zstd compression of stored objects
+ compress: false # turn on/off zstd(level 3) compression of stored objects
small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
+ perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
- opened_cache_ttl: 5m # ttl for opened database file
- opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
+ leaf_width: 10 # max count of key-value DB on leafs of object tree storage
+ - perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@@ -188,38 +162,26 @@ storage:
# degraded-read-only
# disabled (do not work with the shard, allows to not remove it from the config)
resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding
- resync_metabase_worker_count: 100
writecache:
enabled: false
no_sync: true
path: tmp/0/cache # write-cache root directory
capacity: 3221225472 # approximate write-cache total size, bytes
- max_object_count: 49
- page_size: 4k
- max_flushing_objects_size: 100b
metabase:
path: tmp/0/meta # metabase path
max_batch_size: 100
max_batch_delay: 10ms
- compression:
- enabled: true # turn on/off zstd compression of stored objects
- level: fastest
- exclude_content_types:
- - audio/*
- - video/*
- estimate_compressibility: true
- estimate_compressibility_threshold: 0.7
+ compress: true # turn on/off zstd(level 3) compression of stored objects
+ compression_exclude_content_types:
+ - audio/*
+ - video/*
blobstor:
- type: blobovnicza
path: tmp/0/blob/blobovnicza
- init_worker_count: 10 #count of workers to initialize blobovniczas
- rebuild_drop_timeout: 30s # timeout before drop single blobovnicza
- opened_cache_ttl: 1m
- opened_cache_exp_interval: 30s
- type: fstree
path: tmp/0/blob # blobstor path
@@ -232,60 +194,7 @@ storage:
remover_batch_size: 150 # number of objects to be removed by the garbage collector
remover_sleep_interval: 2m # frequency of the garbage collector invocation
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
- expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
-
- limits:
- read:
- max_running_ops: 10000
- max_waiting_ops: 1000
- idle_timeout: 30s
- tags:
- - tag: internal
- weight: 20
- limit_ops: 0
- reserved_ops: 1000
- - tag: client
- weight: 70
- reserved_ops: 10000
- - tag: background
- weight: 5
- limit_ops: 10000
- reserved_ops: 0
- - tag: writecache
- weight: 5
- limit_ops: 25000
- - tag: policer
- weight: 5
- limit_ops: 25000
- prohibited: true
- - tag: treesync
- weight: 5
- limit_ops: 25
- write:
- max_running_ops: 1000
- max_waiting_ops: 100
- idle_timeout: 45s
- tags:
- - tag: internal
- weight: 200
- limit_ops: 0
- reserved_ops: 100
- - tag: client
- weight: 700
- reserved_ops: 1000
- - tag: background
- weight: 50
- limit_ops: 1000
- reserved_ops: 0
- - tag: writecache
- weight: 50
- limit_ops: 2500
- - tag: policer
- weight: 50
- limit_ops: 2500
- - tag: treesync
- weight: 50
- limit_ops: 100
+ expired_collector_workers_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
1:
writecache:
@@ -305,46 +214,12 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
- perm: 0o644 # permission to use for the database file and intermediate directories
+ perm: 0644 # permission to use for the database file and intermediate directories
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
- trusted_ca: ""
- attributes:
- - key: key0
- value: value
- - key: key1
- value: value
runtime:
soft_memory_limit: 1gb
-
-audit:
- enabled: true
-
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
-
-qos:
- critical:
- authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
- internal:
- authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
- - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
diff --git a/config/mainnet/README.md b/config/mainnet/README.md
new file mode 100644
index 000000000..717a9b0ff
--- /dev/null
+++ b/config/mainnet/README.md
@@ -0,0 +1,28 @@
+# N3 Mainnet Storage node configuration
+
+Here is a template for simple storage node configuration in N3 Mainnet.
+Make sure to specify correct values instead of `<...>` placeholders.
+Do not change `contracts` section. Run the latest frostfs-node release with
+the fixed config `frostfs-node -c config.yml`
+
+To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
+The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
+(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
+
+## Tips
+
+Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
+```yaml
+node:
+ addresses:
+ - grpcs://frostfs.my.org:8080
+
+grpc:
+ num: 1
+ 0:
+ endpoint: frostfs.my.org:8080
+ tls:
+ enabled: true
+ certificate: /path/to/cert
+ key: /path/to/key
+```
diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml
new file mode 100644
index 000000000..7db476e55
--- /dev/null
+++ b/config/mainnet/config.yml
@@ -0,0 +1,70 @@
+node:
+ wallet:
+ path:
+ address:
+ password:
+ addresses:
+ -
+ attribute_0: UN-LOCODE:
+ attribute_1: Price:100000
+ attribute_2: User-Agent:FrostFS\/0.9999
+
+grpc:
+ num: 1
+ 0:
+ endpoint:
+ tls:
+ enabled: false
+
+storage:
+ shard_num: 1
+ shard:
+ 0:
+ metabase:
+ path: /storage/path/metabase
+ perm: 0600
+ blobstor:
+ - path: /storage/path/blobovnicza
+ type: blobovnicza
+ perm: 0600
+ opened_cache_capacity: 32
+ depth: 1
+ width: 1
+ - path: /storage/path/fstree
+ type: fstree
+ perm: 0600
+ depth: 4
+ writecache:
+ enabled: false
+ gc:
+ remover_batch_size: 100
+ remover_sleep_interval: 1m
+
+logger:
+ level: info
+
+prometheus:
+ enabled: true
+ address: localhost:9090
+ shutdown_timeout: 15s
+
+object:
+ put:
+ pool_size_remote: 100
+ pool_size_local: 100
+
+morph:
+ rpc_endpoint:
+ - wss://rpc1.morph.frostfs.info:40341/ws
+ - wss://rpc2.morph.frostfs.info:40341/ws
+ - wss://rpc3.morph.frostfs.info:40341/ws
+ - wss://rpc4.morph.frostfs.info:40341/ws
+ - wss://rpc5.morph.frostfs.info:40341/ws
+ - wss://rpc6.morph.frostfs.info:40341/ws
+ - wss://rpc7.morph.frostfs.info:40341/ws
+ dial_timeout: 20s
+
+contracts:
+ balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
+ container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
+ netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
diff --git a/config/testnet/README.md b/config/testnet/README.md
new file mode 100644
index 000000000..b5faf2b27
--- /dev/null
+++ b/config/testnet/README.md
@@ -0,0 +1,129 @@
+# N3 Testnet Storage node configuration
+
+There is a prepared configuration for NeoFS Storage Node deployment in
+N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
+docker image and run it with docker-compose.
+
+## Build image
+
+Prepared **frostfs-storage-testnet** image is available at Docker Hub.
+However, if you need to rebuild it for some reason, run
+`make image-storage-testnet` command.
+
+```
+$ make image-storage-testnet
+...
+Successfully built ab0557117b02
+Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
+```
+
+## Deploy node
+
+To run a storage node in N3 Testnet environment, you should deposit GAS assets,
+update docker-compose file and start the node.
+
+### Deposit
+
+The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
+bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
+
+First, obtain GAS in N3 Testnet chain. You can do that with
+[faucet](https://neowish.ngd.network) service.
+
+Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
+You can provide scripthash in the `data` argument of transfer tx to make a
+deposit to a specified account. Otherwise, deposit is made to the tx sender.
+
+NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
+so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
+
+See a deposit example with `neo-go`.
+
+```
+neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
+--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
+--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
+--token GAS \
+--amount 1
+```
+
+### Configure
+
+Next, configure `node_config.env` file. Change endpoints values. Both
+should contain your **public** IP.
+
+```
+NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
+NEOFS_NODE_ADDRESSES=65.52.183.157:36512
+```
+
+Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
+attribute.
+
+```
+NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
+NEOFS_NODE_ADDRESSES=65.52.183.157:36512
+NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
+```
+
+You can validate UN/LOCODE attribute in
+[NeoFS LOCODE database](https://github.com/TrueCloudLab/frostfs-locode-db/releases/tag/v0.1.0)
+with frostfs-cli.
+
+```
+$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
+Country: Russia
+Location: Saint Petersburg (ex Leningrad)
+Continent: Europe
+Subdivision: [SPE] Sankt-Peterburg
+Coordinates: 59.53, 30.15
+```
+
+It is recommended to pass the node's key as a file. To do so, convert your wallet
+WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
+
+```
+// Print WIF in a 32-byte hex format
+$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
+PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
+PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
+WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
+Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
+ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
+ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
+
+// Save 32-byte hex into a file
+$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
+```
+
+Then, specify the path to this file in `docker-compose.yml`
+```yaml
+ volumes:
+ - frostfs_storage:/storage
+ - ./my_wallet.key:/node.key
+```
+
+
+NeoFS objects will be stored on your machine. By default, docker-compose
+is configured to store objects in named docker volume `frostfs_storage`. You can
+specify a directory on the filesystem to store objects there.
+
+```yaml
+ volumes:
+ - /home/username/frostfs/rc3/storage:/storage
+ - ./my_wallet.key:/node.key
+```
+
+### Start
+
+Run the node with `docker-compose up` command and stop it with `docker-compose down`.
+
+### Debug
+
+To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
+log, set up log level to debug with this env:
+
+```yaml
+ environment:
+ - NEOFS_LOGGER_LEVEL=debug
+```
diff --git a/config/testnet/config.yml b/config/testnet/config.yml
new file mode 100644
index 000000000..76b36cdf6
--- /dev/null
+++ b/config/testnet/config.yml
@@ -0,0 +1,52 @@
+logger:
+ level: info
+
+morph:
+ rpc_endpoint:
+ - wss://rpc01.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc02.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc03.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc04.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc05.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc06.morph.testnet.frostfs.info:51331/ws
+ - wss://rpc07.morph.testnet.frostfs.info:51331/ws
+ dial_timeout: 20s
+
+contracts:
+ balance: e0420c216003747626670d1424569c17c79015bf
+ container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
+ netmap: d4b331639799e2958d4bc5b711b469d79de94e01
+
+node:
+ key: /node.key
+ attribute_0: Deployed:SelfHosted
+ attribute_1: User-Agent:FrostFS\/0.9999
+
+prometheus:
+ enabled: true
+ address: localhost:9090
+ shutdown_timeout: 15s
+
+storage:
+ shard_num: 1
+ shard:
+ 0:
+ metabase:
+ path: /storage/metabase
+ perm: 0777
+ blobstor:
+ - path: /storage/path/blobovnicza
+ type: blobovnicza
+ perm: 0600
+ opened_cache_capacity: 32
+ depth: 1
+ width: 1
+ - path: /storage/path/fstree
+ type: fstree
+ perm: 0600
+ depth: 4
+ writecache:
+ enabled: false
+ gc:
+ remover_batch_size: 100
+ remover_sleep_interval: 1m
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 000000000..47328c419
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,5 @@
+frostfs-node (0.0.1) stable; urgency=medium
+
+ * Initial package build
+
+ -- TrueCloudLab Tue, 25 Oct 2022 21:10:49 +0300
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 000000000..44dc05e0a
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1,2 @@
+man/
+debian/*.bash-completion
diff --git a/debian/control b/debian/control
new file mode 100644
index 000000000..f3f214bca
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,39 @@
+Source: frostfs-node
+Section: misc
+Priority: optional
+Maintainer: TrueCloudLab
+Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
+Standards-Version: 4.5.1
+Homepage: https://fs.neo.org/
+Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git
+Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node
+
+Package: frostfs-storage
+Architecture: any
+Depends: ${misc:Depends}
+Description: FrostFS Storage node
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
+
+Package: frostfs-ir
+Architecture: any
+Depends: ${misc:Depends}, frostfs-locode-db
+Description: FrostFS InnerRing node
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
+
+Package: frostfs-cli
+Architecture: any
+Depends: ${misc:Depends}
+Description: CLI tools for FrostFS
+ FrostFS is a decentralized distributed object storage integrated with the NEO
+ Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
+ of storing and distributing user's data. Any Neo user may participate in the
+ network and get paid for providing storage resources to other users or store
+ their data in FrostFS and pay a competitive price for it.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 000000000..61dab665d
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,23 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: frostfs-node
+Upstream-Contact: tech@frostfs.info
+Source: https://git.frostfs.info/TrueCloudLab/frostfs-node
+
+Files: *
+Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project
+ 2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project
+ (https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md)
+
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; version 3.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program or at /usr/share/common-licenses/GPL-3
+ If not, see .
diff --git a/debian/frostfs-cli.docs b/debian/frostfs-cli.docs
new file mode 100644
index 000000000..58d4559cc
--- /dev/null
+++ b/debian/frostfs-cli.docs
@@ -0,0 +1,4 @@
+CONTRIBUTING.md
+CREDITS.md
+README.md
+cmd/frostfs-adm/docs
diff --git a/debian/frostfs-cli.install b/debian/frostfs-cli.install
new file mode 100644
index 000000000..93025187b
--- /dev/null
+++ b/debian/frostfs-cli.install
@@ -0,0 +1,3 @@
+bin/frostfs-adm usr/bin
+bin/frostfs-cli usr/bin
+bin/frostfs-lens usr/bin
diff --git a/debian/frostfs-cli.manpages b/debian/frostfs-cli.manpages
new file mode 100644
index 000000000..85c5e001d
--- /dev/null
+++ b/debian/frostfs-cli.manpages
@@ -0,0 +1 @@
+man/*
diff --git a/debian/frostfs-ir.dirs b/debian/frostfs-ir.dirs
new file mode 100644
index 000000000..90da8fd27
--- /dev/null
+++ b/debian/frostfs-ir.dirs
@@ -0,0 +1,2 @@
+/etc/frostfs/ir
+/var/lib/frostfs/ir
diff --git a/debian/frostfs-ir.docs b/debian/frostfs-ir.docs
new file mode 100644
index 000000000..38b0cef26
--- /dev/null
+++ b/debian/frostfs-ir.docs
@@ -0,0 +1,3 @@
+CONTRIBUTING.md
+CREDITS.md
+README.md
diff --git a/debian/frostfs-ir.install b/debian/frostfs-ir.install
new file mode 100644
index 000000000..e052f5434
--- /dev/null
+++ b/debian/frostfs-ir.install
@@ -0,0 +1 @@
+bin/frostfs-ir usr/bin
diff --git a/debian/frostfs-ir.postinst b/debian/frostfs-ir.postinst
new file mode 100755
index 000000000..eb9d381c9
--- /dev/null
+++ b/debian/frostfs-ir.postinst
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `configure'
+# * `abort-upgrade'
+# * `abort-remove' `in-favour'
+#
+# * `abort-remove'
+# * `abort-deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+case "$1" in
+ configure)
+ USERNAME=ir
+ id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir
+ if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
+ chmod -f 0750 /etc/frostfs/$USERNAME
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
+ chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
+ chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
+ fi
+ USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)"
+ if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
+ chown -f frostfs-$USERNAME: "$USERDIR"
+ fi
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.postrm b/debian/frostfs-ir.postrm
new file mode 100755
index 000000000..cbb7db2f2
--- /dev/null
+++ b/debian/frostfs-ir.postrm
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `purge'
+# * `upgrade'
+# * `failed-upgrade'
+# * `abort-install'
+# * `abort-install'
+# * `abort-upgrade'
+# * `disappear'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ purge)
+ rm -rf /var/lib/frostfs/ir/*
+ ;;
+
+ remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.preinst b/debian/frostfs-ir.preinst
new file mode 100755
index 000000000..37f952537
--- /dev/null
+++ b/debian/frostfs-ir.preinst
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `install'
+# * `install'
+# * `upgrade'
+# * `abort-upgrade'
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.prerm b/debian/frostfs-ir.prerm
new file mode 100755
index 000000000..0da369d75
--- /dev/null
+++ b/debian/frostfs-ir.prerm
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `upgrade'
+# * `failed-upgrade'
+# * `remove' `in-favour'
+# * `deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-ir.service b/debian/frostfs-ir.service
new file mode 100644
index 000000000..b59295ac5
--- /dev/null
+++ b/debian/frostfs-ir.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=FrostFS InnerRing node
+Requires=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml
+User=frostfs-ir
+Group=frostfs-ir
+WorkingDirectory=/var/lib/frostfs/ir
+Restart=always
+RestartSec=5
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/frostfs-storage.dirs b/debian/frostfs-storage.dirs
new file mode 100644
index 000000000..4142145ee
--- /dev/null
+++ b/debian/frostfs-storage.dirs
@@ -0,0 +1,3 @@
+/etc/frostfs/storage
+/srv/frostfs
+/var/lib/frostfs/storage
diff --git a/debian/frostfs-storage.docs b/debian/frostfs-storage.docs
new file mode 100644
index 000000000..cd1f5f23f
--- /dev/null
+++ b/debian/frostfs-storage.docs
@@ -0,0 +1,4 @@
+docs/storage-node-configuration.md
+CONTRIBUTING.md
+CREDITS.md
+README.md
diff --git a/debian/frostfs-storage.install b/debian/frostfs-storage.install
new file mode 100644
index 000000000..670935e7b
--- /dev/null
+++ b/debian/frostfs-storage.install
@@ -0,0 +1 @@
+bin/frostfs-node usr/bin
diff --git a/debian/frostfs-storage.postinst b/debian/frostfs-storage.postinst
new file mode 100755
index 000000000..88fa53be5
--- /dev/null
+++ b/debian/frostfs-storage.postinst
@@ -0,0 +1,55 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `configure'
+# * `abort-upgrade'
+# * `abort-remove' `in-favour'
+#
+# * `abort-remove'
+# * `abort-deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+case "$1" in
+ configure)
+ USERNAME=storage
+ id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME
+ if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
+ chmod -f 0750 /etc/frostfs/$USERNAME
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
+ chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
+ chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
+ chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
+ fi
+ USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6)
+ if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
+ chown -f frostfs-$USERNAME: "$USERDIR"
+ fi
+ USERDIR=/srv/frostfs
+ if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
+ chown -f frostfs-$USERNAME: $USERDIR
+ fi
+ ;;
+
+ abort-upgrade|abort-remove|abort-deconfigure)
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.postrm b/debian/frostfs-storage.postrm
new file mode 100755
index 000000000..d9c8c9656
--- /dev/null
+++ b/debian/frostfs-storage.postrm
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `purge'
+# * `upgrade'
+# * `failed-upgrade'
+# * `abort-install'
+# * `abort-install'
+# * `abort-upgrade'
+# * `disappear'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ purge)
+ rm -rf /var/lib/frostfs/storage/*
+ ;;
+
+ remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
+ ;;
+
+ *)
+ echo "postrm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.preinst b/debian/frostfs-storage.preinst
new file mode 100755
index 000000000..37f952537
--- /dev/null
+++ b/debian/frostfs-storage.preinst
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `install'
+# * `install'
+# * `upgrade'
+# * `abort-upgrade'
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.prerm b/debian/frostfs-storage.prerm
new file mode 100755
index 000000000..0da369d75
--- /dev/null
+++ b/debian/frostfs-storage.prerm
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * `remove'
+# * `upgrade'
+# * `failed-upgrade'
+# * `remove' `in-favour'
+# * `deconfigure' `in-favour'
+# `removing'
+#
+# for details, see https://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ remove|upgrade|deconfigure)
+ ;;
+
+ failed-upgrade)
+ ;;
+
+ *)
+ echo "prerm called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/frostfs-storage.service b/debian/frostfs-storage.service
new file mode 100644
index 000000000..6e7d1dd5d
--- /dev/null
+++ b/debian/frostfs-storage.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=FrostFS Storage node
+Requires=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml
+User=frostfs-storage
+Group=frostfs-storage
+WorkingDirectory=/srv/frostfs
+Restart=always
+RestartSec=5
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 000000000..0dd8ee399
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,40 @@
+#!/usr/bin/make -f
+
+# Do not try to strip Go binaries
+export DEB_BUILD_OPTIONS := nostrip
+
+%:
+ dh $@ --with bash-completion
+
+override_dh_auto_test:
+
+override_dh_auto_install:
+ echo $(DEB_BUILD_OPTIONS)
+ dh_auto_install
+
+ bin/frostfs-adm gendoc --type man man/
+ bin/frostfs-cli gendoc --type man man/
+
+ bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion
+ bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion
+ install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/
+ install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/
+ bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish
+ bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm
+ bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish
+ bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli
+
+ install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml
+ install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml
+ install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml
+ install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml
+
+override_dh_installsystemd:
+ dh_installsystemd --no-enable --no-start --name=frostfs-ir
+ dh_installsystemd --no-enable --no-start --name=frostfs-storage
+
+override_dh_installchangelogs:
+ dh_installchangelogs -k CHANGELOG.md
+
+override_dh_installdocs:
+ dh_installdocs
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 000000000..163aaf8d8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json
deleted file mode 100644
index b68ce4fa3..000000000
--- a/dev/.vscode-example/launch.json
+++ /dev/null
@@ -1,269 +0,0 @@
-{
- "version": "0.2.0",
- "configurations": [
- {
- "name": "IR",
- "type": "go",
- "request": "launch",
- "mode": "debug",
- "program": "cmd/frostfs-ir",
- "env": {
- "FROSTFS_IR_LOGGER_LEVEL":"info",
- "FROSTFS_IR_WALLET_PATH":"${workspaceFolder}/dev/ir/az.json",
- "FROSTFS_IR_WALLET_ADDRESS":"Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn",
- "FROSTFS_IR_WALLET_PASSWORD":"one",
- "FROSTFS_IR_WITHOUT_MAINNET":"true",
- "FROSTFS_IR_MORPH_ENDPOINT_CLIENT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
- "FROSTFS_IR_MORPH_VALIDATORS":"02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
- "FROSTFS_IR_TIMERS_EMIT":"50",
- "FROSTFS_IR_TIMERS_STOP_ESTIMATION_MUL":"1",
- "FROSTFS_IR_TIMERS_STOP_ESTIMATION_DIV":"4",
- "FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_MUL":"1",
- "FROSTFS_IR_TIMERS_COLLECT_BASIC_INCOME_DIV":"2",
- "FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_MUL":"3",
- "FROSTFS_IR_TIMERS_DISTRIBUTE_BASIC_INCOME_DIV":"4",
- "FROSTFS_IR_EMIT_STORAGE_AMOUNT":"1000000000",
- "FROSTFS_IR_NETMAP_CLEANER_ENABLED":"true",
- "FROSTFS_IR_NETMAP_CLEANER_THRESHOLD":"3",
- "FROSTFS_IR_LOCODE_DB_PATH":"${workspaceFolder}/.cache/locode_db",
- "FROSTFS_IR_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8090",
- "FROSTFS_IR_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-ir-state"
- },
- "postDebugTask": "env-down"
- },
- {
- "name": "Storage node 1",
- "type": "go",
- "request": "launch",
- "mode": "debug",
- "program": "cmd/frostfs-node",
- "env": {
- "FROSTFS_LOGGER_LEVEL":"debug",
- "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
- "FROSTFS_NODE_WALLET_PASSWORD":"",
- "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
- "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8080",
- "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8081",
- "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
- "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev",
- "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW",
- "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s1-state",
- "FROSTFS_TREE_ENABLED":"true",
- "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s1/wc0",
- "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s1/meta0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s1/blobovnicza0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s1/fstree0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama0",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s1/wc1",
- "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s1/meta1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s1/blobovnicza1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s1/fstree1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1",
- "FROSTFS_PROMETHEUS_ENABLED":"true",
- "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080"
- },
- "postDebugTask": "env-down"
- },
- {
- "name": "Storage node 2",
- "type": "go",
- "request": "launch",
- "mode": "debug",
- "program": "cmd/frostfs-node",
- "env": {
- "FROSTFS_LOGGER_LEVEL":"debug",
- "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
- "FROSTFS_NODE_WALLET_PASSWORD":"",
- "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
- "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8082",
- "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8083",
- "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
- "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev",
- "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW",
- "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s2-state",
- "FROSTFS_TREE_ENABLED":"true",
- "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s2/wc0",
- "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s2/meta0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s2/blobovnicza0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s2/fstree0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama0",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s2/wc1",
- "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s2/meta1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s2/blobovnicza1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s2/fstree1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1",
- "FROSTFS_PROMETHEUS_ENABLED":"true",
- "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082"
- },
- "postDebugTask": "env-down"
- },
- {
- "name": "Storage node 3",
- "type": "go",
- "request": "launch",
- "mode": "debug",
- "program": "cmd/frostfs-node",
- "env": {
- "FROSTFS_LOGGER_LEVEL":"debug",
- "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
- "FROSTFS_NODE_WALLET_PASSWORD":"",
- "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
- "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8084",
- "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8085",
- "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
- "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev",
- "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW",
- "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s3-state",
- "FROSTFS_TREE_ENABLED":"true",
- "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s3/wc0",
- "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s3/meta0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s3/blobovnicza0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s3/fstree0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama0",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s3/wc1",
- "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s3/meta1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s3/blobovnicza1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s3/fstree1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1",
- "FROSTFS_PROMETHEUS_ENABLED":"true",
- "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084"
- },
- "postDebugTask": "env-down"
- },
- {
- "name": "Storage node 4",
- "type": "go",
- "request": "launch",
- "mode": "debug",
- "program": "cmd/frostfs-node",
- "env": {
- "FROSTFS_LOGGER_LEVEL":"debug",
- "FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
- "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
- "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
- "FROSTFS_NODE_WALLET_PASSWORD":"",
- "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",
- "FROSTFS_GRPC_0_ENDPOINT":"127.0.0.1:8086",
- "FROSTFS_CONTROL_GRPC_ENDPOINT":"127.0.0.1:8087",
- "FROSTFS_CONTROL_AUTHORIZED_KEYS":"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
- "FROSTFS_NODE_ATTRIBUTE_0":"User-Agent:FrostFS/dev",
- "FROSTFS_NODE_ATTRIBUTE_1":"UN-LOCODE:RU MOW",
- "FROSTFS_NODE_PERSISTENT_STATE_PATH":"${workspaceFolder}/.cache/state/.frostfs-node-s4-state",
- "FROSTFS_TREE_ENABLED":"true",
- "FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME":"10",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s4/wc0",
- "FROSTFS_STORAGE_SHARD_0_METABASE_PATH":"${workspaceFolder}/.cache/storage/s4/meta0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s4/blobovnicza0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s4/fstree0",
- "FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_0_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama0",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED":"true",
- "FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH":"${workspaceFolder}/.cache/storage/s4/wc1",
- "FROSTFS_STORAGE_SHARD_1_METABASE_PATH":"${workspaceFolder}/.cache/storage/s4/meta1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE":"blobovnicza",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH":"${workspaceFolder}/.cache/storage/s4/blobovnicza1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH":"4",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE":"fstree",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH":"${workspaceFolder}/.cache/storage/s4/fstree1",
- "FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH":"2",
- "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1",
- "FROSTFS_PROMETHEUS_ENABLED":"true",
- "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093",
- "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s",
- "FROSTFS_TRACING_ENABLED":"true",
- "FROSTFS_TRACING_EXPORTER":"otlp_grpc",
- "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317",
- "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip",
- "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086"
- },
- "postDebugTask": "env-down"
- }
- ],
- "compounds": [
- {
- "name": "IR+Storage node",
- "configurations": ["IR", "Storage node 1"],
- "preLaunchTask": "env-up",
- "stopAll": true
- },
- {
- "name": "IR + 4 storage nodes",
- "configurations": ["IR", "Storage node 1", "Storage node 2", "Storage node 3", "Storage node 4"],
- "preLaunchTask": "env-up",
- "stopAll": true
- }
- ]
-}
diff --git a/dev/.vscode-example/tasks.json b/dev/.vscode-example/tasks.json
deleted file mode 100644
index 0ccd9d110..000000000
--- a/dev/.vscode-example/tasks.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "version": "2.0.0",
- "tasks": [
- {
- "type": "shell",
- "label": "env-up",
- "command": "make env-up",
- "group": "build",
- "detail": "Up debug environment"
- },
- {
- "type": "shell",
- "label": "env-down",
- "command": "make env-down",
- "group": "build",
- "detail": "Down debug environment"
- }
- ]
-}
diff --git a/dev/adm/frostfs-adm.yml b/dev/adm/frostfs-adm.yml
deleted file mode 100644
index e75cc27f0..000000000
--- a/dev/adm/frostfs-adm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-rpc-endpoint: http://127.0.0.1:30333
-alphabet-wallets: ./dev/ir
-network:
- max_object_size: 5242880
- epoch_duration: 60
- basic_income_rate: 100000000
- homomorphic_hash_disabled: false
- maintenance_mode_allowed: true
- max_ec_data_count: 12
- max_ec_parity_count: 4
- fee:
- audit: 10000
- candidate: 10000000000
- container: 0
- container_alias: 0
- withdraw: 100000000
-credentials:
- az: "one"
- contract: "one"
diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml
deleted file mode 100644
index 40ed35aeb..000000000
--- a/dev/docker-compose.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-
-version: "2.4"
-services:
- neo-go:
- image: nspccdev/neo-go:0.106.0
- container_name: neo-go
- command: ["node", "--config-path", "/config", "--privnet", "--debug"]
- stop_signal: SIGKILL
- ports:
- - 30333:30333
- volumes:
- - ./neo-go/protocol.privnet.yml:/config/protocol.privnet.yml
- - ./neo-go/node-wallet.json:/wallets/node-wallet.json
- - ./neo-go/config.yml:/wallets/config.yml
- - ./neo-go/wallet.json:/wallets/wallet.json
- jaeger:
- image: jaegertracing/all-in-one:latest
- container_name: jaeger
- ports:
- - '4317:4317' #OTLP over gRPC
- - '4318:4318' #OTLP over HTTP
- - '16686:16686' #frontend
- stop_signal: SIGKILL
- environment:
- - COLLECTOR_OTLP_ENABLED=true
- - SPAN_STORAGE_TYPE=badger
- - BADGER_EPHEMERAL=true
diff --git a/dev/empty_pass.yml b/dev/empty_pass.yml
deleted file mode 100644
index 1986cf9e4..000000000
--- a/dev/empty_pass.yml
+++ /dev/null
@@ -1 +0,0 @@
-password: ""
diff --git a/dev/ir/az.json b/dev/ir/az.json
deleted file mode 100644
index 8e88b432c..000000000
--- a/dev/ir/az.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "version": "3.0",
- "name":null,
- "accounts": [
- {
- "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "single",
- "contract": {
- "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": false
- },
- {
- "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "consensus",
- "contract": {
- "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": false
- },
- {
- "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "committee",
- "contract": {
- "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": true
- }
- ],
- "scrypt": {
- "n": 16384,
- "r": 8,
- "p": 8
- },
- "extra": {
- "Tokens": null
- }
-}
diff --git a/dev/ir/contract.json b/dev/ir/contract.json
deleted file mode 100644
index 310b77bd7..000000000
--- a/dev/ir/contract.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version": "3.0",
- "accounts": [
- {
- "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "",
- "contract": {
- "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "isDefault": false
- }
- ],
- "scrypt": {
- "n": 16384,
- "r": 8,
- "p": 8
- },
- "extra": {
- "Tokens": null
- }
-}
diff --git a/dev/neo-go/config.yml b/dev/neo-go/config.yml
deleted file mode 100644
index 7b4bb29d7..000000000
--- a/dev/neo-go/config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-
-Path: "/wallets/node-wallet.json"
-Password: "one"
diff --git a/dev/neo-go/node-wallet.json b/dev/neo-go/node-wallet.json
deleted file mode 100644
index 8e88b432c..000000000
--- a/dev/neo-go/node-wallet.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "version": "3.0",
- "name":null,
- "accounts": [
- {
- "address": "Nhfg3TbpwogLvDGVvAvqyThbsHgoSUKwtn",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "single",
- "contract": {
- "script": "DCECs2Ir9AF73+MXxYrtX0x1PyBrfbiWBG+n13S7xL9/jcJBVuezJw==",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": false
- },
- {
- "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "consensus",
- "contract": {
- "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": false
- },
- {
- "address": "NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP",
- "key": "6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY",
- "label": "committee",
- "contract": {
- "script": "EQwhArNiK/QBe9/jF8WK7V9MdT8ga324lgRvp9d0u8S/f43CEUGe0Nw6",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "extra":null,
- "isDefault": true
- }
- ],
- "scrypt": {
- "n": 16384,
- "r": 8,
- "p": 8
- },
- "extra": {
- "Tokens": null
- }
-}
diff --git a/dev/neo-go/protocol.privnet.yml b/dev/neo-go/protocol.privnet.yml
deleted file mode 100644
index 8aaf774a3..000000000
--- a/dev/neo-go/protocol.privnet.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-ProtocolConfiguration:
- Magic: 15405
- MaxTraceableBlocks: 200000
- TimePerBlock: 1s
- MemPoolSize: 50000
- StandbyCommittee:
- - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- ValidatorsCount: 1
- SeedList:
- - 0.0.0.0:20333
- VerifyTransactions: true
- StateRootInHeader: true
- P2PSigExtensions: true
-
-ApplicationConfiguration:
- SkipBlockVerification: false
- DBConfiguration:
- Type: "boltdb"
- BoltDBOptions:
- FilePath: "./db/morph.bolt"
- P2P:
- Addresses:
- - ":20333"
- DialTimeout: 3s
- ProtoTickInterval: 2s
- PingInterval: 30s
- PingTimeout: 90s
- MaxPeers: 10
- AttemptConnPeers: 5
- MinPeers: 0
- Relay: true
- Consensus:
- Enabled: true
- UnlockWallet:
- Path: "./wallets/node-wallet.json"
- Password: "one"
- RPC:
- Addresses:
- - "0.0.0.0:30333"
- Enabled: true
- SessionEnabled: true
- EnableCORSWorkaround: false
- MaxGasInvoke: 100
- P2PNotary:
- Enabled: true
- UnlockWallet:
- Path: "./wallets/node-wallet.json"
- Password: "one"
diff --git a/dev/neo-go/wallet.json b/dev/neo-go/wallet.json
deleted file mode 100644
index ce68d604c..000000000
--- a/dev/neo-go/wallet.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version": "3.0",
- "accounts": [
- {
- "address": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM",
- "key": "6PYP7YrwGnLuu4WYQbEe3WJiC44aKmqwqawLsp7H3oh5vocS9xTv2ZfTp3",
- "label": "",
- "contract": {
- "script": "DCEDGmxvu98CyjUXRfqGubpalFLXhaxPf8K3VIyipGxPz0pBVuezJw==",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "isdefault": false
- }
- ],
- "scrypt": {
- "n": 16384,
- "r": 8,
- "p": 8
- },
- "extra": {
- "Tokens": null
- }
-}
diff --git a/dev/storage/wallet01.json b/dev/storage/wallet01.json
deleted file mode 100644
index e5b6bb371..000000000
--- a/dev/storage/wallet01.json
+++ /dev/null
@@ -1,32 +0,0 @@
-{
- "version":"3.0",
- "accounts":[
- {
- "address":"NejLbQpojKJWec4NQRMBhzsrmCyhXfGJJe",
- "key":"6PYSS8ccmBcttfcw2YJh8VcNSoeQbQLuJLQ7HoKeYF5roRmGs9LUvmKcWz",
- "label":"",
- "contract":{
- "script":"DCECK7QEHFDWB/+HHex+TNd3g4jg6mhJ2EzL2aqPMuFqgTFBVuezJw==",
- "parameters":[
- {
- "name":"parameter0",
- "type":"Signature"
- }
- ],
- "deployed":false
- },
- "lock":false,
- "extra":null,
- "isDefault":true
- }
- ],
- "name":null,
- "scrypt":{
- "n":16384,
- "r":8,
- "p":8
- },
- "extra":{
- "Tokens":null
- }
- }
diff --git a/dev/storage/wallet02.json b/dev/storage/wallet02.json
deleted file mode 100644
index 9c073deef..000000000
--- a/dev/storage/wallet02.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version":"3.0",
- "accounts":[
- {
- "address":"NVXXy3hNTvwVEZa2dAibALyJB3Q86aiHvL",
- "key":"6PYXd9hxMYfaCkgeZp3q1RoMB921RQFkRxYftcacTJ2S7MUwnivrxi6Yk5",
- "label":"",
- "contract":{
- "script":"DCED/2W2rnkTSk3OnQ0504Uem6tO6Xq/hugeHFu8UM0oJq5BVuezJw==",
- "parameters":[
- {
- "name":"parameter0",
- "type":"Signature"
- }
- ],
- "deployed":false
- },
- "lock":false,
- "isDefault":false
- }
- ],
- "scrypt":{
- "n":16384,
- "r":8,
- "p":8
- },
- "extra":{
- "Tokens":null
- }
- }
diff --git a/dev/storage/wallet03.json b/dev/storage/wallet03.json
deleted file mode 100644
index c054a3160..000000000
--- a/dev/storage/wallet03.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version":"3.0",
- "accounts":[
- {
- "address":"NPTmih9X14Y7xLvmD6RVtDHdH1Y9qJwoTe",
- "key":"6PYXNeQzge9fWztVnWYRbr5Mh9q1y4npKVARHYGb484Hct1iNd3vXGR1kk",
- "label":"",
- "contract":{
- "script":"DCECrJIM198LYbKJBy5rlG4tpOGjG5qxxiG7R14w+kqxAsNBVuezJw==",
- "parameters":[
- {
- "name":"parameter0",
- "type":"Signature"
- }
- ],
- "deployed":false
- },
- "lock":false,
- "isDefault":false
- }
- ],
- "scrypt":{
- "n":16384,
- "r":8,
- "p":8
- },
- "extra":{
- "Tokens":null
- }
- }
diff --git a/dev/storage/wallet04.json b/dev/storage/wallet04.json
deleted file mode 100644
index cb4676df6..000000000
--- a/dev/storage/wallet04.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version":"3.0",
- "accounts":[
- {
- "address":"Ne2DAQbWvP1s7TbtFc7BStKMnjKJdBaVRm",
- "key":"6PYWCsGWx8uSVYK94tvK7Ccit8x8Z3f3dHADTFTgLhT9NBXTBqBECL8AyC",
- "label":"",
- "contract":{
- "script":"DCEDjIYpWeVrQ+IPeRh8T+ngvHyMZsFgPmzw7H+Hq2sI3DVBVuezJw==",
- "parameters":[
- {
- "name":"parameter0",
- "type":"Signature"
- }
- ],
- "deployed":false
- },
- "lock":false,
- "isDefault":false
- }
- ],
- "scrypt":{
- "n":16384,
- "r":8,
- "p":8
- },
- "extra":{
- "Tokens":null
- }
- }
diff --git a/dev/wallet.json b/dev/wallet.json
deleted file mode 100644
index ce68d604c..000000000
--- a/dev/wallet.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "version": "3.0",
- "accounts": [
- {
- "address": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM",
- "key": "6PYP7YrwGnLuu4WYQbEe3WJiC44aKmqwqawLsp7H3oh5vocS9xTv2ZfTp3",
- "label": "",
- "contract": {
- "script": "DCEDGmxvu98CyjUXRfqGubpalFLXhaxPf8K3VIyipGxPz0pBVuezJw==",
- "parameters": [
- {
- "name": "parameter0",
- "type": "Signature"
- }
- ],
- "deployed": false
- },
- "lock": false,
- "isdefault": false
- }
- ],
- "scrypt": {
- "n": 16384,
- "r": 8,
- "p": 8
- },
- "extra": {
- "Tokens": null
- }
-}
diff --git a/docs/authentication.md b/docs/authentication.md
deleted file mode 100644
index 3fe5ca512..000000000
--- a/docs/authentication.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Authentication and signatures
-
-## General overview
-
-
-
-## Signatures
-
-Every message in the FrostFS network is signed.
-Each signature consists of:
-1. Scheme
-2. Public key
-3. Signature
-
-If signature check fails, operation is aborted and the error is returned to the user.
-
-### Schemes
-Currently, 3 schemes are defined in the [frostfs-api](https://git.frostfs.info/TrueCloudLab/frostfs-api/src/commit/4bae9dd78abcf1a358a65a45fe7303e37fd98099/refs/types.proto#L105):
-
-#### ECDSA
-
-Defined in section 6 of [FIPS 186](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf).
-Implemented in the Go stdlib.
-This is the primary algorithm used to sign and verify requests.
-The hash algorithm used is SHA-512.
-
-#### RFC6979
-
-[RFC 6979](https://www.rfc-editor.org/rfc/rfc6979) defines deterministic algorithm for ECDSA signatures.
-It it used primarily used by neo-go and allows us to perform signature checks inside the contract, such as for container.
-The hash algorithm used is SHA-256
-
-### Public key
-
-ECDSA public key corresponding to the private key being used to sign a message.
-It is the primary user identity and is used to determine the request originator.
-
-## Tokens
-
-Generally, the request owner, i.e. an account all access control checks are applied to
-is taken from the request signature.
-However, session and bearer tokens can alter authentication process by making "effective" request owner differ from the actual one.
-The general scheme is given by the following picture:
-
-
-
-It is important to note, that the token is only valid when the request signature corresponds to the actor token is issued to.
-
-### Session token
-
-Session token can override the rules of determining request owner.
-It is defined in the [frostfs-api](https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/session/types.proto#L89).
-If user A signs a session token for user B, then user B can sign request with its own key, while the node will still process the request as if it was originated from user A.
-This is used, for example, when putting objects in system:
-1. User creates a session with node, recevieving session token.
-2. User signs session token for a freshly generated key, stored on a storage node.
-3. User sends raw stream of bytes, while the node signs created objects with the session key. This way other nodes can validate the object owned by user, even though it is signed by a different key.
-
-Session token may have some restrictions:
-1. Lifetime, effectively an epoch after which it becomes invalid.
-2. Set of operations it applies to.
-3. The entity it is given to. This is provided in `session_key` field containing the public key.
-
-### Bearer token
-
-Bearer token is generally used for access control but can also affect authentication if `allow_impersonate` flag is set. With this flag it behaves similarly to session token.
-
-## FrostFS ID
-
-## APE
diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md
new file mode 100644
index 000000000..26a77a27f
--- /dev/null
+++ b/docs/building-deb-package.md
@@ -0,0 +1,46 @@
+# Building Debian package on host
+
+## Prerequisites
+
+For now, we're assuming building for Debian 11 (stable) x86_64.
+
+Go version 18.4 or later should already be installed, i.e. this runs
+successfully:
+
+* `make all`
+
+## Installing packaging dependencies
+
+```shell
+$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
+```
+
+Warining: number of package installed is pretty large considering dependecies.
+
+## Package building
+
+```shell
+$ make debpackage
+```
+
+## Leftovers cleaning
+
+```shell
+$ make debclean
+```
+or
+```shell
+$ dh clean
+```
+
+# Package versioning
+
+By default, package version is based on product version and may also contain git
+tags and hashes.
+
+Package version could be overwritten by setting `PKG_VERSION` variable before
+build, Debian package versioning rules should be respected.
+
+```shell
+$ PKG_VERSION=0.32.0 make debpackge
+```
diff --git a/docs/epoch.md b/docs/epoch.md
deleted file mode 100644
index 2f33dcfd5..000000000
--- a/docs/epoch.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Epoch
-
-The main purpose of the `epoch` in `frostfs` environment is to manipulate `netmap`.
-Each new epoch, `ir` service trigger revision content of the `netmap` by adding or removing nodes to or from it.
-`node` service trigger few internal processes each new epoch - for example, running GC.
-Epoch also used in an object lifecycle.
-
-At the startup, `ir` service initializes an epoch timer which handles new epoch tick.
-Epoch timer is a block timer - which means that this timer ticks each block or set of blocks.
-The epoch duration stores in the configurable parameter `EpochDuration` in the blockchain.
-It is possible to get it via `frostfs-adm`:
-```shell
-> frostfs-adm morph dump-config -c config.yml -r http://morph-chain.frostfs.devenv:30333
-...
-EpochDuration: 240 (int)
-...
->
-```
-Once epoch timer ticks, `ir` service call method [NewEpoch](https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/commit/a1b61d3949581f4d65b0d32a33d98ba9c193dc2a/netmap/netmap_contract.go#L238)
-of the `netmap` contract. Each `ir` instance can do this at the same time, but it is not an issue,
-because multiple call of this method with the same set of parameters will give us the same result.
-
-Utility `frostfs-adm` have a command to trigger new epoch:
-```shell
-> frostfs-adm morph force-new-epoch -c config.yml -r http://morph-chain.frostfs.devenv:30333
-```
-Command goes directly to the `netmap` contract and call method `NewEpoch`.
-Method checks alphabet witness and stores candidates nodes which are not in the `OFFLINE` state as a current netmap.
-Then executes method `NewEpoch` in `balance` and `container` contracts.
-At the end it produces notification `NewEpoch` which is handled by `node` and `ir` services.
-
-`ir` handler for `NewEpoch` updates internal state of the netmap, if it is necessary, updates state of the nodes or
-marks for exclusion from netmap in the blockchain.
-
-`node` handler for `NewEpoch` executes method `addPeer` of the `netmap` contract.
-This method do nothing, but produces notification which handled by `ir` service.
-`ir` in handler for `AddPeer` may update node state in the netmap if it is necessary.
-
-At the startup, node bootstraps with state `ONLINE`. From the online state, it is possible to move to `MAINTENANCE` or `OFFLINE`.
-Node moved to `OFFLINE` state automatically, when there is no bootstrap request from it for a number of epochs.
-This number stored in the `ir` config `netmap_cleaner.threshold`.
-From `OFFLINE` state node, once it bootstrapped, moves to `ONLINE`.
-`MAINTENANCE` state persists even if node rebooted or unavailable for a few epochs.
diff --git a/docs/evacuation.md b/docs/evacuation.md
index d47d56d15..9bfa0e214 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -10,29 +10,16 @@ First of all, by the evacuation the data is transferred to other shards of the s
Only one running evacuation process is allowed on the node at a time.
-It is not necessary to turn maintenance mode on storage node.
-
-Once evacuation from shard started, it is impossible to read data from it via public API, except the case when evacuation stopped manually or node restarted.
-
-Because it is necessary to prevent removing by policer objects with policy `REP 1 ...` from remote node during evacuation.
-
`frostfs-cli` utility is used to manage evacuation.
## Commands
`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag.
-By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
-To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`.
-To adjust resource consumption required for evacuation use options:
- - `--container-worker-count` count of concurrent container evacuation workers
- - `--object-worker-count` count of concurrent object evacuation workers
`frostfs-cli control shards evacuation stop` stops running evacuation process.
`frostfs-cli control shards evacuation status` prints evacuation process status.
-`frostfs-cli control shards evacuation reset` resets evacuation process status.
-
See commands `--help` output for detailed description.
## Examples
@@ -52,15 +39,15 @@ Shard evacuation has been successfully started.
frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
-Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:00:03. Estimated time left: 2 minutes.
+Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 14 object out of 61, failed to evacuate 0 objects. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:00:03. Estimated time left: 2 minutes.
frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
-Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 260 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:01:05. Estimated time left: 1 minutes.
+Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: running. Evacuated 23 object out of 61, failed to evacuate 0 objects. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:01:05. Estimated time left: 1 minutes.
frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
-Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: completed. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:02:13.
+Shard IDs: 8kEBwtvKLU3Hva3PaaodUi. Status: completed. Evacuated 61 object out of 61, failed to evacuate 0 objects. Started at: 2023-05-10T10:13:06Z UTC. Duration: 00:02:13.
```
### Stop running evacuation process
@@ -71,7 +58,7 @@ Shard evacuation has been successfully started.
frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:03. Estimated time left: 0 minutes.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 15 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:03. Estimated time left: 0 minutes.
frostfs-cli control shards evacuation stop --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
@@ -79,7 +66,7 @@ Evacuation stopped.
frostfs-cli control shards evacuation status --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json
Enter password >
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: completed. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Error: context canceled. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:07.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: completed. Evacuated 31 object out of 73, failed to evacuate 0 objects. Error: context canceled. Started at: 2023-05-10T10:15:47Z UTC. Duration: 00:00:07.
```
### Start evacuation and await it completes
@@ -88,11 +75,11 @@ frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 -
Enter password >
Shard evacuation has been successfully started.
Progress will be reported every 5 seconds.
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 131 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:04. Estimated time left: 0 minutes.
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 343 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:09. Estimated time left: 0 minutes.
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 545 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 0 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14. Estimated time left: 0 minutes.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 18 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:04. Estimated time left: 0 minutes.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 43 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:09. Estimated time left: 0 minutes.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Status: running. Evacuated 68 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14. Estimated time left: 0 minutes.
Shard evacuation has been completed.
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 73 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:18:42Z UTC. Duration: 00:00:14.
```
### Start evacuation and await it completes without progress notifications
@@ -101,15 +88,5 @@ frostfs-cli control shards evacuation start --endpoint s01.frostfs.devenv:8081 -
Enter password >
Shard evacuation has been successfully started.
Shard evacuation has been completed.
-Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 618 objects out of 618, failed to evacuate: 0, skipped: 0; evacuated 19 trees out of 19, failed to evacuate: 0. Started at: 2023-05-10T10:20:00Z UTC. Duration: 00:00:14.
-```
-
-### Start trees evacuation and await it completes
-```bash
-frostfs-cli control shards evacuation start --id FxR6QujButNCHn7jjdhxGP --endpoint s01.frostfs.devenv:8081 --wallet ./../frostfs-dev-env/services/storage/wallet01.json --await --scope trees
-Enter password >
-Shard evacuation has been successfully started.
-Progress will be reported every 5 seconds.
-Shard evacuation has been completed.
-Shard IDs: FxR6QujButNCHn7jjdhxGP. Evacuated 0 objects out of 0, failed to evacuate: 0, skipped: 0; evacuated 2 trees out of 2, failed to evacuate: 0. Started at: 2024-02-08T08:44:17Z UTC. Duration: 00:00:00.
+Shard IDs: 54Y8aot9uc7BSadw2XtYr3. Evacuated 73 object out of 73, failed to evacuate 0 objects. Started at: 2023-05-10T10:20:00Z UTC. Duration: 00:00:14.
```
diff --git a/docs/images/authentication/authoverview.puml b/docs/images/authentication/authoverview.puml
deleted file mode 100644
index 20cac9f52..000000000
--- a/docs/images/authentication/authoverview.puml
+++ /dev/null
@@ -1,28 +0,0 @@
-@startuml authoverview
-!include
-!include
-AddElementTag("smart-contract", $bgColor=#0abab5)
-
-Person(user, "User", "User with private key")
-
-Container_Boundary(stor, "FrostFS Storage") {
- Component(verify, "Sign Service", $descr="Check request signature")
- Component(apesvc, "APE Service")
- Component(objsvc, "Object service")
-}
-
-Container_Boundary(neogo, "Blockchain") {
- Interface "NeoGo"
- Component(ffsid, "FrostFS ID", $tags="smart-contract", $descr="Stores namespaces and users")
- Component(policy, "Policy", $tags="smart-contract", $descr="Stores APE rules")
-}
-
-Rel_R(user, verify, "Requests", "gRPC")
-Rel_R(verify, apesvc, "Access control")
-Rel_R(apesvc, objsvc, "Operation")
-Rel_D(apesvc, NeoGo, "Get data to validate request")
-Rel("NeoGo", ffsid, "Fetch users")
-Rel("NeoGo", policy, "Fetch policies")
-
-SHOW_LEGEND(true)
-@enduml
diff --git a/docs/images/authentication/authoverview.svg b/docs/images/authentication/authoverview.svg
deleted file mode 100644
index a34a68da0..000000000
--- a/docs/images/authentication/authoverview.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/images/authentication/impersonate.puml b/docs/images/authentication/impersonate.puml
deleted file mode 100644
index f0a5436f9..000000000
--- a/docs/images/authentication/impersonate.puml
+++ /dev/null
@@ -1,15 +0,0 @@
-@startuml impersonate
-start
-
-if (The request has bearer token with allow_impersonate=true?) then (yes)
- :Treat bearer token issuer as the request owner.;
- end
-(no) elseif (The request has session token?) then (yes)
- :Treat session token issuer as the request owner.;
- end
-else (no)
- :Determine request owner from the request signature.;
- end
-endif
-
-@enduml
diff --git a/docs/images/authentication/impersonate.svg b/docs/images/authentication/impersonate.svg
deleted file mode 100644
index add2c5439..000000000
--- a/docs/images/authentication/impersonate.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index aa867e83c..ec7b8cdf3 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -9,7 +9,7 @@ These should run successfully:
* `make lint` (should not change any files);
* `make fmts` (should not change any files);
* `go mod tidy` (should not change any files);
-* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env).
+* integration tests in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv).
## Make release commit
@@ -43,6 +43,11 @@ Write new revision number into the root `VERSION` file:
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
```
+Update version in Debian package changelog file
+```
+$ cat debian/changelog
+```
+
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
`README.md` if needed.
@@ -55,7 +60,7 @@ Add an entry to the `CHANGELOG.md` following the style established there.
* update `Unreleased...new` and `new...old` diff-links at the bottom of the file
* add optional codename and release date in the heading
* remove all empty sections such as `Added`, `Removed`, etc.
-* make sure all changes have references to relevant issues in `#123` format (if possible)
+* make sure all changes have references to GitHub issues in `#123` format (if possible)
* clean up all `Unreleased` sections and leave them empty
### Make release commit
@@ -95,31 +100,35 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
-### Prepare and push images to a Docker registry (automated)
+### Prepare and push images to a Docker Hub (if not automated)
-Create Docker images for all applications and push them into container registry
-(executed automatically in Forgejo Actions upon pushing a release tag):
+Create Docker images for all applications and push them into Docker Hub
+(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
-$ make push-images
+$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
+$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
```
-### Make a proper release (if not automated)
+### Make a proper GitHub release (if not automated)
-Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`.
+Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`.
Build and tar release binaries with `make prepare-release`, attach them to
the release. Publish the release.
### Update FrostFS Developer Environment
-Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
+Prepare pull-request in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv)
with new versions.
-### Close milestone
+### Close GitHub milestone
-Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
+Look up GitHub [milestones](https://github.com/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
### Rebuild FrostFS LOCODE database
diff --git a/docs/shard-modes.md b/docs/shard-modes.md
index 6cc4ab13c..7fca33d09 100644
--- a/docs/shard-modes.md
+++ b/docs/shard-modes.md
@@ -14,16 +14,7 @@ Each mode is characterized by two important properties:
| `read-only` | Read-only mode, only read operations are allowed, metabase is available. |
| `degraded` | Degraded mode in which metabase and write-cache is disabled. It shouldn't be used at all, because metabase can contain important indices, such as LOCK objects info and modifying operation in this mode can lead to unexpected behaviour. The purpose of this mode is to allow PUT/DELETE operations without the metabase if really necessary. |
| `degraded-read-only` | Same as `degraded`, but with only read operations allowed. This mode is used during SSD replacement and/or when the metabase error counter exceeds threshold. |
-| `disabled` | Currently used only in config file to temporarily disable a shard.
-
-## Shard and Component Status
-
-| Shard Mode | Metabase Mode | Blobstore Mode | Writecache Mode | Pilorama Mode | Blobovnicza Tree Mode | FSTree Mode |
-|-----------------------|---------------|----------------|-----------------|---------------|-----------------------|-------------|
-| `Read-Write` | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE | READ_WRITE |
-| `Read-Only` | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY | READ_ONLY |
-| `Degraded-Read-Write` | CLOSED | READ_WRITE | CLOSED | CLOSED | READ_WRITE | READ_WRITE |
-| `Degraded-Read-Only` | CLOSED | READ_ONLY | CLOSED | CLOSED | READ_ONLY | READ_ONLY |
+| `disabled` | Currently used only in config file to temporarily disable a shard. |
## Transition order
@@ -51,10 +42,7 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
-A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
-
-# Detach shard
-
-To detach a shard use `frostfs-cli control shards detach` command. This command removes the shards from the storage
-engine and closes all resources associated with the shards.
-Limitation: `SIGHUP` or storage node restart lead to detached shard will be again online.
+Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
+1. If the metabase was not available or couldn't be opened/initialized during shard startup.
+2. If shard error counter exceeds threshold.
+3. If the metabase couldn't be reopened during SIGHUP handling.
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index da9fdfed0..2e2d04088 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -14,7 +14,6 @@ There are some custom types used for brevity:
| Section | Description |
|--------------|---------------------------------------------------------|
-| `node` | [Node parameters](#node-section) |
| `logger` | [Logging parameters](#logger-section) |
| `pprof` | [PProf configuration](#pprof-section) |
| `prometheus` | [Prometheus metrics configuration](#prometheus-section) |
@@ -26,9 +25,7 @@ There are some custom types used for brevity:
| `replicator` | [Replicator service configuration](#replicator-section) |
| `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
-| `audit` | [Audit configuration](#audit-section) |
-| `multinet` | [Multinet configuration](#multinet-section) |
-| `qos` | [QoS configuration](#qos-section) |
+
# `control` section
```yaml
@@ -112,21 +109,11 @@ Contains logger parameters.
```yaml
logger:
level: info
- tags:
- - names: "main, morph"
- level: debug
```
-| Parameter | Type | Default value | Description |
-|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------|
-| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
-| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. |
-
-## `tags` subsection
-| Parameter | Type | Default value | Description |
-|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. |
-| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. |
+| Parameter | Type | Default value | Description |
+|-----------|----------|---------------|---------------------------------------------------------------------------------------------------|
+| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` |
# `contracts` section
Contains override values for FrostFS side-chain contract hashes. Most of the time contract
@@ -152,26 +139,20 @@ contracts:
morph:
dial_timeout: 30s
cache_ttl: 15s
- ape_chain_cache_size: 10000
rpc_endpoint:
- address: wss://rpc1.morph.frostfs.info:40341/ws
priority: 1
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
- netmap:
- candidates:
- poll_interval: 20s
```
-| Parameter | Type | Default value | Description |
-|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
-| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
-| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
-| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
-| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
-| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
+| Parameter | Type | Default value | Description |
+|-------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
+| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. |
+| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
+| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@@ -185,6 +166,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
+| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@@ -195,41 +177,17 @@ Contains configuration for each shard. Keys must be consecutive numbers starting
`default` subsection has the same format and specifies defaults for missing values.
The following table describes configuration for each shard.
-| Parameter | Type | Default value | Description |
-| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- |
-| `compression` | [Compression config](#compression-subsection) | | Compression config. |
-| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
-| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
-| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
-| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
-| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
-| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
-| `gc` | [GC config](#gc-subsection) | | GC configuration. |
-| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
-
-### `compression` subsection
-
-Contains compression config.
-
-```yaml
-compression:
- enabled: true
- level: smallest_size
- exclude_content_types:
- - audio/*
- - video/*
- estimate_compressibility: true
- estimate_compressibility_threshold: 0.7
-```
-
-| Parameter | Type | Default value | Description |
-| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `enabled` | `bool` | `false` | Flag to enable compression. |
-| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. |
-| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
-| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. |
-| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. |
+| Parameter | Type | Default value | Description |
+|-------------------------------------|---------------------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `compress` | `bool` | `false` | Flag to enable compression. |
+| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). |
+| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
+| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
+| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
+| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
+| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
+| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
+| `gc` | [GC config](#gc-subsection) | | GC configuration. |
### `blobstor` subsection
@@ -244,13 +202,11 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
- perm: 0o644
+ perm: 0644
size: 4194304
depth: 1
width: 4
opened_cache_capacity: 50
- opened_cache_ttl: 5m
- opened_cache_exp_interval: 15s
```
#### Common options for sub-storages
@@ -267,18 +223,14 @@ blobstor:
| `depth` | `int` | `4` | File-system tree depth. |
#### `blobovnicza` type options
-| Parameter | Type | Default value | Description |
-|-----------------------------| ---------- |---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `path` | `string` | | Path to the root of the blobstor. |
-| `perm` | file mode | `0660` | Default permission for created files and directories. |
-| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
-| `depth` | `int` | `2` | Blobovnicza tree depth. |
-| `width` | `int` | `16` | Blobovnicza tree width. |
-| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
-| `opened_cache_ttl` | `duration` | `0` | TTL in cache for opened blobovniczas(disabled by default). In case of heavy random-read and 10 shards each with 10_000 databases and accessing 400 objects per-second we will access each db approximately once per ((10 * 10_000 / 400) = 250 seconds <= 300 seconds = 5 min). Also take in mind that in this scenario they will probably be closed earlier because of the cache capacity, so bigger values are likely to be of no use. |
-| `opened_cache_exp_interval` | `duration` | `15s` | Cache cleanup interval for expired blobovnicza's. |
-| `init_worker_count` | `int` | `5` | Maximum number of concurrent initialization workers. |
-| `rebuild_drop_timeout` | `duration` | `10s` | Timeout before drop empty blobovnicza file during rebuild. |
+| Parameter | Type | Default value | Description |
+|-------------------------|-----------|---------------|-------------------------------------------------------|
+| `path` | `string` | | Path to the root of the blobstor. |
+| `perm` | file mode | `0660` | Default permission for created files and directories. |
+| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
+| `depth` | `int` | `2` | Blobovnicza tree depth. |
+| `width` | `int` | `16` | Blobovnicza tree width. |
+| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
### `gc` subsection
@@ -289,7 +241,7 @@ gc:
remover_batch_size: 200
remover_sleep_interval: 5m
expired_collector_batch_size: 500
- expired_collector_worker_count: 5
+ expired_collector_workers_count: 5
```
| Parameter | Type | Default value | Description |
@@ -297,14 +249,14 @@ gc:
| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. |
| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. |
| `expired_collector_batch_size` | `int` | `500` | Max amount of expired objects to grab in a single batch. |
-| `expired_collector_worker_count` | `int` | `5` | Max amount of concurrent expired objects workers. |
+| `expired_collector_workers_count` | `int` | `5` | Max amount of concurrent expired objects workers. |
### `metabase` subsection
```yaml
metabase:
path: /path/to/meta.db
- perm: 0o644
+ perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@@ -321,80 +273,25 @@ metabase:
```yaml
writecache:
enabled: true
+ type: bbolt
path: /path/to/writecache
capacity: 4294967296
+ small_object_size: 16384
max_object_size: 134217728
- flush_worker_count: 30
+ workers_number: 30
```
-| Parameter | Type | Default value | Description |
-| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| `path` | `string` | | Path to the metabase file. |
-| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
-| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
-| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+| Parameter | Type | Default value | Description |
+|----------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------|
+| `type` | `string` | | Type of write cache backing implementation to use (`bbolt`, `badger`). |
+| `path` | `string` | | Path to the metabase file. |
+| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
+| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
+| `workers_number` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
+| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
+| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
-### `limits` subsection
-
-```yaml
-limits:
- max_read_running_ops: 10000
- max_read_waiting_ops: 1000
- max_write_running_ops: 1000
- max_write_waiting_ops: 100
- read:
- - tag: internal
- weight: 20
- limit_ops: 0
- reserved_ops: 1000
- - tag: client
- weight: 70
- reserved_ops: 10000
- - tag: background
- weight: 5
- limit_ops: 10000
- reserved_ops: 0
- - tag: writecache
- weight: 5
- limit_ops: 25000
- - tag: policer
- weight: 5
- limit_ops: 25000
- write:
- - tag: internal
- weight: 200
- limit_ops: 0
- reserved_ops: 100
- - tag: client
- weight: 700
- reserved_ops: 1000
- - tag: background
- weight: 50
- limit_ops: 1000
- reserved_ops: 0
- - tag: writecache
- weight: 50
- limit_ops: 2500
- - tag: policer
- weight: 50
- limit_ops: 2500
-```
-
-| Parameter | Type | Default value | Description |
-| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
-| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
-| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
-| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
-| `read` | `[]tag` | empty | Array of shard read settings for tags. |
-| `write` | `[]tag` | empty | Array of shard write settings for tags. |
-| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
-| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
-| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
-| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
-| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section
@@ -410,22 +307,32 @@ node:
- "Price:11"
- "UN-LOCODE:RU MSK"
- "key:value"
+ relay: false
persistent_sessions:
path: /sessions
persistent_state:
path: /state
- locode_db_path: "/path/to/locode/db"
+ notification:
+ enabled: true
+ endpoint: tls://localhost:4222
+ timeout: 6s
+ default_topic: topic
+ certificate: /path/to/cert.pem
+ key: /path/to/key.pem
+ ca: /path/to/ca.pem
```
-| Parameter | Type | Default value | Description |
-|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------|
-| `key` | `string` | | Path to the binary-encoded private key. |
-| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
-| `addresses` | `[]string` | | Addresses advertised in the netmap. |
-| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
-| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
-| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
-| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. |
+| Parameter | Type | Default value | Description |
+|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------|
+| `key` | `string` | | Path to the binary-encoded private key. |
+| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. |
+| `addresses` | `[]string` | | Addresses advertised in the netmap. |
+| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. |
+| `relay` | `bool` | | Enable relay mode. |
+| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. |
+| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. |
+| `notification` | [Notification config](#notification-subsection) | | NATS configuration. |
+
## `wallet` subsection
N3 wallet configuration.
@@ -452,6 +359,19 @@ It is used to correctly handle node restarts or crashes.
|-----------|----------|------------------------|------------------------|
| `path` | `string` | `.frostfs-storage-state` | Path to the database. |
+## `notification` subsection
+This is an advanced section, use with caution.
+
+| Parameter | Type | Default value | Description |
+|-----------------|------------|-------------------|-------------------------------------------------------------------|
+| `enabled` | `bool` | `false` | Flag to enable the service. |
+| `endpoint` | `string` | | NATS endpoint to connect to. |
+| `timeout` | `duration` | `5s` | Timeout for the object notification operation. |
+| `default_topic` | `string` | node's public key | Default topic to use if an object has no corresponding attribute. |
+| `certificate` | `string` | | Path to the client certificate. |
+| `key` | `string` | | Path to the client key. |
+| `ca` | `string` | | Override root CA used to verify server certificates. |
+
# `apiclient` section
Configuration for the FrostFS API client used for communication with other FrostFS nodes.
@@ -490,46 +410,25 @@ replicator:
pool_size: 10
```
-| Parameter | Type | Default value | Description |
-|---------------|------------|---------------|---------------------------------------------|
-| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
-| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
+| Parameter | Type | Default value | Description |
+|---------------|------------|----------------------------------------|---------------------------------------------|
+| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
+| `pool_size` | `int` | Equal to `object.put.pool_size_remote` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
- get:
- priority:
- - $attribute:ClusterName
+ put:
+ pool_size_remote: 100
```
-| Parameter | Type | Default value | Description |
-|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
-| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
-| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
-
-
-# `rpc` section
-Contains limits on the number of active RPC for specified method(s).
-
-```yaml
-rpc:
- limits:
- - methods:
- - /neo.fs.v2.object.ObjectService/PutSingle
- - /neo.fs.v2.object.ObjectService/Put
- max_ops: 1000
- - methods:
- - /neo.fs.v2.object.ObjectService/Get
- max_ops: 10000
-```
-
-| Parameter | Type | Default value | Description |
-|------------------|------------|---------------|--------------------------------------------------------------|
-| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
-| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
+| Parameter | Type | Default value | Description |
+|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
+| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
+| `put.pool_size_remote` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
+| `put.pool_size_local` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
# `runtime` section
Contains runtime parameters.
@@ -542,61 +441,3 @@ runtime:
| Parameter | Type | Default value | Description |
|---------------------|--------|---------------|--------------------------------------------------------------------------|
| `soft_memory_limit` | `size` | 0 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
-
-# `audit` section
-Contains audit parameters.
-
-```yaml
-audit:
- enabled: true
-```
-
-| Parameter | Type | Default value | Description |
-|-----------|--------|---------------|---------------------------------------------------|
-| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
-
-
-# `multinet` section
-Contains multinet parameters.
-
-```yaml
-multinet:
- enabled: true
- subnets:
- - mask: 192.168.219.174/24
- source_ips:
- - 192.168.218.185
- - 192.168.219.185
- - mask: 10.78.70.74/24
- source_ips:
- - 10.78.70.185
- - 10.78.71.185
- balancer: roundrobin
- restrict: false
- fallback_delay: 350ms
-```
-
-| Parameter | Type | Default value | Description |
-| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
-| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
-| `subnets` | `subnet` | empty | Resulting subnets. |
-| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
-| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
-| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
-
-# `qos` section
-```yaml
-qos:
- critical:
- authorized_keys:
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
- internal:
- authorized_keys:
- - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
-```
-| Parameter | Type | Default value | Description |
-| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
-| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
-| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |
diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md
index 195e0c6b3..f99225046 100644
--- a/docs/update-go-instruction.md
+++ b/docs/update-go-instruction.md
@@ -7,7 +7,7 @@
## Update CI
Change Golang versions for unit test in CI.
-There is `go` section in `.forgejo/workflows/*.yml` files:
+There is `go` section in `.github/workflows/go.yaml` file:
```yaml
jobs:
test:
diff --git a/go.mod b/go.mod
index fb45c3874..28008128c 100644
--- a/go.mod
+++ b/go.mod
@@ -1,97 +1,90 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
-go 1.23.0
+go 1.20
require (
- code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
- git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
- git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
- git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
- git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.0
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.0
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230928142024-84b9d29fc98c
git.frostfs.info/TrueCloudLab/hrw v1.2.1
- git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
- git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
- git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
- github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
- github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
- github.com/felixge/fgprof v0.9.5
+ github.com/dgraph-io/ristretto v0.1.1
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
- github.com/gdamore/tcell/v2 v2.7.4
- github.com/go-pkgz/expirable-cache/v3 v3.0.0
- github.com/google/uuid v1.6.0
- github.com/hashicorp/golang-lru/v2 v2.0.7
- github.com/klauspost/compress v1.17.4
- github.com/mailru/easyjson v0.7.7
+ github.com/google/uuid v1.3.0
+ github.com/hashicorp/golang-lru/v2 v2.0.4
+ github.com/klauspost/compress v1.16.6
+ github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
- github.com/multiformats/go-multiaddr v0.15.0
- github.com/nspcc-dev/neo-go v0.106.3
+ github.com/multiformats/go-multiaddr v0.9.0
+ github.com/nats-io/nats.go v1.27.1
+ github.com/nspcc-dev/neo-go v0.101.5-0.20230808195420-5fc61be5f6c5
github.com/olekukonko/tablewriter v0.0.5
- github.com/panjf2000/ants/v2 v2.9.0
- github.com/prometheus/client_golang v1.19.0
- github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130
- github.com/spf13/cast v1.6.0
- github.com/spf13/cobra v1.8.1
+ github.com/panjf2000/ants/v2 v2.7.5
+ github.com/paulmach/orb v0.9.2
+ github.com/prometheus/client_golang v1.16.0
+ github.com/spf13/cast v1.5.1
+ github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.19.0
- github.com/ssgreg/journald v1.0.0
- github.com/stretchr/testify v1.9.0
- go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.31.0
- go.opentelemetry.io/otel/trace v1.31.0
- go.uber.org/zap v1.27.0
- golang.org/x/sync v0.12.0
- golang.org/x/sys v0.31.0
- golang.org/x/term v0.30.0
- google.golang.org/grpc v1.69.2
- google.golang.org/protobuf v1.36.1
+ github.com/spf13/viper v1.16.0
+ github.com/stretchr/testify v1.8.4
+ go.etcd.io/bbolt v1.3.7
+ go.opentelemetry.io/otel v1.16.0
+ go.opentelemetry.io/otel/trace v1.16.0
+ go.uber.org/zap v1.24.0
+ golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
+ golang.org/x/sync v0.3.0
+ golang.org/x/term v0.9.0
+ google.golang.org/grpc v1.56.1
+ google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v3 v3.0.1
)
require (
- github.com/sagikazarmark/locafero v0.6.0 // indirect
- github.com/sagikazarmark/slog-shim v0.1.0 // indirect
- github.com/sourcegraph/conc v0.3.0 // indirect
+ github.com/dustin/go-humanize v1.0.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/glog v1.1.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/flatbuffers v1.12.1 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ go.opencensus.io v0.24.0 // indirect
)
require (
+ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bits-and-blooms/bitset v1.13.0 // indirect
- github.com/cenkalti/backoff/v4 v4.3.0 // indirect
- github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/bits-and-blooms/bitset v1.8.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
- github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
- github.com/davidmz/go-pageant v1.0.2 // indirect
+ github.com/consensys/gnark-crypto v0.11.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
- github.com/gdamore/encoding v1.0.0 // indirect
- github.com/go-fed/httpsig v1.1.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/dgraph-io/badger/v4 v4.1.0
+ github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
- github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
- github.com/gorilla/websocket v1.5.1 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0-rc.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.5 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/holiman/uint256 v1.2.4 // indirect
+ github.com/holiman/uint256 v1.2.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/ipfs/go-cid v0.5.0 // indirect
- github.com/josharian/intern v1.0.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.10 // indirect
- github.com/klauspost/reedsolomon v1.12.1 // indirect
- github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/magiconair/properties v1.8.7 // indirect
- github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/mattn/go-runewidth v0.0.14 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
@@ -100,38 +93,44 @@ require (
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
- github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
- github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
- github.com/pelletier/go-toml/v2 v2.2.2 // indirect
- github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.48.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
- github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/nats-io/nats-server/v2 v2.7.4 // indirect
+ github.com/nats-io/nkeys v0.4.4 // indirect
+ github.com/nats-io/nuid v1.0.1 // indirect
+ github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230808195420-5fc61be5f6c5 // indirect
+ github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.8 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_model v0.4.0 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.11.0 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
- github.com/spf13/afero v1.11.0 // indirect
- github.com/subosito/gotenv v1.6.0 // indirect
- github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
+ github.com/spf13/afero v1.9.5 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/subosito/gotenv v1.4.2 // indirect
+ github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.14 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
- go.opentelemetry.io/otel/metric v1.31.0 // indirect
- go.opentelemetry.io/otel/sdk v1.31.0 // indirect
- go.opentelemetry.io/proto/otlp v1.3.1 // indirect
+ go.mongodb.org/mongo-driver v1.12.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
+ go.opentelemetry.io/otel/metric v1.16.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.16.0 // indirect
+ go.opentelemetry.io/proto/otlp v0.20.0 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.36.0 // indirect
- golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
- golang.org/x/net v0.30.0 // indirect
- golang.org/x/text v0.23.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
+ golang.org/x/crypto v0.10.0 // indirect
+ golang.org/x/net v0.11.0 // indirect
+ golang.org/x/sys v0.9.0 // indirect
+ golang.org/x/text v0.10.0 // indirect
+ google.golang.org/genproto v0.0.0-20230628200519-e449d1ea0e82 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230628200519-e449d1ea0e82 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230628200519-e449d1ea0e82 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
- lukechampine.com/blake3 v1.4.0 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
-
-replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07
diff --git a/go.sum b/go.sum
index acc26af36..8490c167c 100644
--- a/go.sum
+++ b/go.sum
@@ -1,49 +1,465 @@
-code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
-code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
+cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
+cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
+cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
+cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
+cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
+cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
+cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
+cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
+cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
+cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
+cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
+cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
+cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
+cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
+cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
+cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
+cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
+cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
+cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
+cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
+cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
+cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
+cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
+cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
+cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
+cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
+cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
+cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
+cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
+cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
+cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
+cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
+cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
+cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
+cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
+cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
+cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
+cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
+cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
+cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
+cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
+cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
+cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.0 h1:vy6leTEGcKVrLmKfeK5pGGIi3D1vn6rX32Hy4gIOWto=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.0/go.mod h1:uY0AYmCznjZdghDnAk7THFIe1Vlg531IxUcus7ZfUJI=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.0 h1:9ahw69njrwf2legz5xNVTA+4A6UwcBzy9oBdbJmoBxU=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.0/go.mod h1:3V8FyzpbIIxzpgfUaSlOJBAT11IzhZzkQnGpYvRQR5E=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
-git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
-git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230928142024-84b9d29fc98c h1:c8mduKlc8Zioppz5o06QRYS5KYX3BFRO+NgKj2q6kD8=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230928142024-84b9d29fc98c/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
-git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
-git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
-git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
-git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
-github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
-github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1:MJCkWUBhi9pn/CrYO1Q3P687y2KeahrOPS9BD9LDGb0=
+github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig=
+github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I=
+github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
+github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
+github.com/abiosoft/ishell/v2 v2.0.2/go.mod h1:E4oTCXfo6QjoCart0QYa5m9w4S+deXs/P/9jA77A9Bs=
+github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
+github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
-github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
-github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
-github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c=
+github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
+github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
+github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
+github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
+github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
+github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
+github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
+github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
+github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
+github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
-github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
-github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
-github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -53,391 +469,1202 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw=
-github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
+github.com/consensys/gnark-crypto v0.9.1/go.mod h1:a2DQL4+5ywF6safEeZFEPGRiiGbjzGFRUN2sg06VuU4=
+github.com/consensys/gnark-crypto v0.11.0 h1:QqzHQlwEqlQr5jfWblGDkwlKHpT+4QodYqqExkAtyks=
+github.com/consensys/gnark-crypto v0.11.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
-github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
-github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
+github.com/dgraph-io/badger/v4 v4.1.0 h1:E38jc0f+RATYrycSUf9LMv/t47XAy+3CApyYSq4APOQ=
+github.com/dgraph-io/badger/v4 v4.1.0/go.mod h1:P50u28d39ibBRmIJuQC/NSdBOg46HnHw7al2SW5QRHg=
+github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
+github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
-github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
-github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
-github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
-github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
-github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
+github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
+github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
-github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
-github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
-github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
-github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
-github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
-github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
-github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
+github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw=
+github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
-github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
-github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio=
-github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
-github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0-rc.0 h1:mdLirNAJBxnGgyB6pjZLcs6ue/6eZGBui6gXspfq4ks=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0-rc.0/go.mod h1:kdXbOySqcQeTxiqglW7aahTmWZy3Pgi6SYL36yvKeyA=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.5 h1:3IZOAnD058zZllQTZNBioTlrzrBG/IjpiZ133IEtusM=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.5/go.mod h1:xbKERva94Pw2cPen0s79J3uXmGzbbpDYFBFDlZ4mV/w=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
+github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.4 h1:7GHuZcgid37q8o5i3QI9KMT4nCWQQ3Kx3Ov6bb9MfK0=
+github.com/hashicorp/golang-lru/v2 v2.0.4/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
-github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
+github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
+github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
+github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
-github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
-github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
-github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
-github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
+github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
-github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
-github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
-github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
-github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
-github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
-github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ=
+github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
-github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
-github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
-github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
-github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=
+github.com/nats-io/nats-server/v2 v2.7.4 h1:c+BZJ3rGzUKCBIM4IXO8uNT2u1vajGbD1kPA6wqCEaM=
+github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc=
+github.com/nats-io/nats.go v1.27.1 h1:OuYnal9aKVSnOzLQIzf7554OXMCG7KbaTkCSBHRcSoo=
+github.com/nats-io/nats.go v1.27.1/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
+github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
+github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
+github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254/go.mod h1:w1Ln2aT+dBlPhLnuZhBV+DfPEdS2CHWWLp5JTScY3bw=
+github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae/go.mod h1:3FjXOoHmA51EGfb5GS/HOv7VdmngNRTssSeQ729dvGY=
+github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
+github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
+github.com/nspcc-dev/dbft v0.0.0-20210721160347-1b03241391ac/go.mod h1:U8MSnEShH+o5hexfWJdze6uMFJteP0ko7J2frO7Yu1Y=
+github.com/nspcc-dev/dbft v0.0.0-20220629112714-fd49ca59d354/go.mod h1:U8MSnEShH+o5hexfWJdze6uMFJteP0ko7J2frO7Yu1Y=
+github.com/nspcc-dev/dbft v0.0.0-20221020093431-31c1bbdc74f2/go.mod h1:g9xisXmX9NP9MjioaTe862n9SlZTrP+6PVUWLBYOr98=
+github.com/nspcc-dev/dbft v0.0.0-20230515113611-25db6ba61d5c/go.mod h1:kjBC9F8L25GR+kIHy/1KgG/KfcoGnVwIiyovgq1uszk=
+github.com/nspcc-dev/go-ordered-json v0.0.0-20210915112629-e1b6cce73d02/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
+github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 h1:n4ZaFCKt1pQJd7PXoMJabZWK9ejjbLOVrkl/lOUmshg=
+github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
+github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU=
+github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
+github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
+github.com/nspcc-dev/neo-go v0.99.2/go.mod h1:9P0yWqhZX7i/ChJ+zjtiStO1uPTolPFUM+L5oNznU8E=
+github.com/nspcc-dev/neo-go v0.100.1/go.mod h1:Nnp7F4e9IBccsgtCeLtUWV+0T6gk1PtP5HRtA13hUfc=
+github.com/nspcc-dev/neo-go v0.101.5-0.20230808195420-5fc61be5f6c5 h1:AXI2upTPeTqX+n4xrBEzPATgEviOM/Prg6UQ6KDm+DU=
+github.com/nspcc-dev/neo-go v0.101.5-0.20230808195420-5fc61be5f6c5/go.mod h1:Z0kpjwnTJj/ik/X6z18xjCL0X2+RNbqlnhKrl+MYgP8=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220809123759-3094d3e0c14b/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230420112658-c50ab951645a/go.mod h1:ZUuXOkdtHZgaC13za/zMgXfQFncZ0jLzfQTe+OsDOtg=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230808195420-5fc61be5f6c5 h1:/d7mY5hYlNhmEXexKcyqSR0b1Hdl5hf/c5o8Vi/1vt4=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230808195420-5fc61be5f6c5/go.mod h1:ZUuXOkdtHZgaC13za/zMgXfQFncZ0jLzfQTe+OsDOtg=
+github.com/nspcc-dev/neofs-api-go/v2 v2.11.0-pre.0.20211201134523-3604d96f3fe1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
+github.com/nspcc-dev/neofs-api-go/v2 v2.11.1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
+github.com/nspcc-dev/neofs-api-go/v2 v2.14.0/go.mod h1:DRIr0Ic1s+6QgdqmNFNLIqMqd7lNMJfYwkczlm1hDtM=
+github.com/nspcc-dev/neofs-contract v0.16.0/go.mod h1:gN5bo2TlMvLbySImmg76DVj3jVmYgti2VVlQ+h/tcr0=
+github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
+github.com/nspcc-dev/neofs-crypto v0.2.3/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
+github.com/nspcc-dev/neofs-crypto v0.3.0/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw=
+github.com/nspcc-dev/neofs-crypto v0.4.0/go.mod h1:6XJ8kbXgOfevbI2WMruOtI+qUJXNwSGM/E9eClXxPHs=
+github.com/nspcc-dev/neofs-sdk-go v0.0.0-20211201182451-a5b61c4f6477/go.mod h1:dfMtQWmBHYpl9Dez23TGtIUKiFvCIxUZq/CkSIhEpz4=
+github.com/nspcc-dev/neofs-sdk-go v0.0.0-20220113123743-7f3162110659/go.mod h1:/jay1lr3w7NQd/VDBkEhkJmDmyPNsu4W+QV2obsUV40=
+github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.9/go.mod h1:fTsdTU/M9rvv/f9jlp7vHOm3DRp+NSfjfTv9NohrKTE=
+github.com/nspcc-dev/rfc6979 v0.1.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
+github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE=
+github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
+github.com/nspcc-dev/tzhash v1.7.0/go.mod h1:Dnx9LUlOLr5paL2Rtc96x0PPs8D9eIkUtowt1n+KQus=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
-github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
-github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
-github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
-github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
-github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/panjf2000/ants/v2 v2.7.5 h1:/vhh0Hza9G1vP1PdCj9hl6MUzCRbmtcTJL0OsnmytuU=
+github.com/panjf2000/ants/v2 v2.7.5/go.mod h1:KIBmYG9QQX5U2qzFP/yQJaq/nSb6rahS9iEHkrCMgM8=
+github.com/paulmach/orb v0.9.2 h1:p/YWV2uJwamAynnDOJGNbPBVtDHj3vG51k9tR1rFwJE=
+github.com/paulmach/orb v0.9.2/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
+github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
+github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
+github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
-github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
-github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
-github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY=
-github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
+github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk=
+github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
-github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
-github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
-github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
-github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
-github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
-github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
-github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
-github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
+github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
-github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
-github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
-github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
+github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
+github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
-github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
-github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
-github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
-github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
+github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
+github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
+github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 h1:JwtAtbp7r/7QSyGz8mKUbYJBg2+6Cd7OjM8o/GNOcVo=
+github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74/go.mod h1:RmMWU37GKR2s6pgrIEB4ixgpVCt/cf7dnJv3fuH1J1c=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
+github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
+github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
-go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
-go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
-go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
-go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
-go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
-go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
-go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
-go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
-go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
-go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
-go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
+github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
+go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
+go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
+go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE=
+go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
+go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc=
+go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
+go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
+go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
+go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
+go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
+go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.20.0 h1:BLOA1cZBAGSbRiNuGCCKiFrCdYB7deeHDeD1SueyOfA=
+go.opentelemetry.io/proto/otlp v0.20.0/go.mod h1:3QgjzPALBIv9pcknj2EXGPXjYPFdUh/RQfF8Lz3+Vnw=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
+golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
+golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
+golang.org/x/exp v0.0.0-20221227203929-1b447090c38c/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
-golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
+golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
-golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
+golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
+golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
+golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
-golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
-google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
-google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
+google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
+google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
+google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
+google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
+google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=
+google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
+google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230628200519-e449d1ea0e82 h1:Wdfp5Hc1bqGCWYZNrir4A1Jb+SmVaV2j1DL/pbMMTGI=
+google.golang.org/genproto v0.0.0-20230628200519-e449d1ea0e82/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto/googleapis/api v0.0.0-20230628200519-e449d1ea0e82 h1:iI5Fmsfz4zDINYxJLxn2YChI//ypkHM/KuVSvlN7ZXk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230628200519-e449d1ea0e82/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230628200519-e449d1ea0e82 h1:6b+zGQBiXFlAMpQr+cCarAdrZD4QgXSG7uUZadYysgg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230628200519-e449d1ea0e82/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
+google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
@@ -445,7 +1672,17 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
-lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/help.mk b/help.mk
index a2ac989dc..c065ec862 100644
--- a/help.mk
+++ b/help.mk
@@ -8,4 +8,4 @@ help:
@echo ''
@echo ' Targets:'
@echo ''
- @awk '/^#/{ comment = substr($$0,3) } /^[a-zA-Z][a-zA-Z0-9_-]+:/{ print " ", $$1, comment; comment = "" }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
+ @awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9_-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
diff --git a/internal/ape/util.go b/internal/ape/util.go
deleted file mode 100644
index 99eba95ba..000000000
--- a/internal/ape/util.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package ape
-
-import "regexp"
-
-var (
- SubjectNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,64}$`)
- GroupNameRegexp = regexp.MustCompile(`^[\w+=,.@-]{1,128}$`)
-
- // NamespaceNameRegexp similar to https://git.frostfs.info/TrueCloudLab/frostfs-contract/src/commit/f2a82aa635aa57d9b05092d8cf15b170b53cc324/nns/nns_contract.go#L690
- NamespaceNameRegexp = regexp.MustCompile(`(^$)|(^[a-z0-9]{1,2}$)|(^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$)`)
-)
diff --git a/internal/assert/cond.go b/internal/assert/cond.go
deleted file mode 100644
index 113d2eba9..000000000
--- a/internal/assert/cond.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package assert
-
-import (
- "fmt"
- "strings"
-)
-
-func True(cond bool, details ...string) {
- if !cond {
- panic(strings.Join(details, " "))
- }
-}
-
-func False(cond bool, details ...string) {
- if cond {
- panic(strings.Join(details, " "))
- }
-}
-
-func NoError(err error, details ...string) {
- if err != nil {
- content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
- panic(content)
- }
-}
-
-func Fail(details ...string) {
- panic(strings.Join(details, " "))
-}
diff --git a/internal/audit/consts.go b/internal/audit/consts.go
deleted file mode 100644
index f4fa19ab9..000000000
--- a/internal/audit/consts.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package audit
-
-const (
- InvalidValue = "invalid_value"
- NotDefined = "not_defined"
- Empty = "empty"
-)
diff --git a/internal/audit/request.go b/internal/audit/request.go
deleted file mode 100644
index 17666ab4b..000000000
--- a/internal/audit/request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package audit
-
-import (
- "context"
-
- crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
-)
-
-type Request interface {
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-type Target interface {
- String() string
-}
-
-func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
- var key []byte
- if req != nil {
- key = req.GetVerificationHeader().GetBodySignature().GetKey()
- }
- LogRequestWithKey(ctx, log, operation, key, target, status)
-}
-
-func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
- object, subject := NotDefined, NotDefined
-
- publicKey := crypto.UnmarshalPublicKey(key)
- if publicKey != nil {
- subject = ((*keys.PublicKey)(publicKey)).StringCompressed()
- }
-
- if target != nil {
- object = target.String()
- }
-
- log.Info(ctx, logs.AuditEventLogRecord,
- zap.String("operation", operation),
- zap.String("object", object),
- zap.String("subject", subject),
- zap.Bool("success", status))
-}
diff --git a/internal/audit/target.go b/internal/audit/target.go
deleted file mode 100644
index 2d6881e29..000000000
--- a/internal/audit/target.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package audit
-
-import (
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type ModelType[T any] interface {
- ReadFromV2(m T) error
- String() string
-}
-
-func TargetFromRef[T any](ref *T, model ModelType[T]) Target {
- if ref == nil {
- return stringTarget{s: NotDefined}
- }
- if err := model.ReadFromV2(*ref); err != nil {
- return stringTarget{s: InvalidValue}
- }
- return stringTarget{s: model.String()}
-}
-
-func TargetFromRefs[T any](refs []*T, model ModelType[T]) Target {
- if len(refs) == 0 {
- return stringTarget{s: NotDefined}
- }
- sb := &strings.Builder{}
- for idx, ref := range refs {
- if idx > 0 {
- sb.WriteString(";")
- }
- if ref == nil {
- sb.WriteString(Empty)
- continue
- }
- if err := model.ReadFromV2(*ref); err != nil {
- sb.WriteString(InvalidValue)
- } else {
- sb.WriteString(model.String())
- }
- }
- return sb
-}
-
-type stringTarget struct {
- s string
-}
-
-func (t stringTarget) String() string {
- return t.s
-}
-
-func TargetFromString(s string) Target {
- if len(s) == 0 {
- s = Empty
- }
- return stringTarget{s: s}
-}
-
-func TargetFromChainID(chainTargetType, chainTargetName string, chainID []byte) Target {
- if len(chainTargetType) == 0 && len(chainTargetName) == 0 && len(chainID) == 0 {
- return stringTarget{s: NotDefined}
- }
- t, n, c := Empty, Empty, Empty
- if len(chainTargetType) > 0 {
- t = chainTargetType
- }
- if len(chainTargetName) > 0 {
- n = chainTargetName
- }
- if len(chainID) > 0 {
- c = string(chainID)
- }
- return stringTarget{s: t + ":" + n + ":" + c}
-}
-
-func TargetFromContainerIDObjectID(containerID *refs.ContainerID, objectID *refs.ObjectID) Target {
- if containerID == nil && objectID == nil {
- return stringTarget{s: NotDefined}
- }
- c, o := Empty, Empty
- if containerID != nil {
- var cnr cid.ID
- if err := cnr.ReadFromV2(*containerID); err != nil {
- c = InvalidValue
- } else {
- c = cnr.EncodeToString()
- }
- }
- if objectID != nil {
- var obj oid.ID
- if err := obj.ReadFromV2(*objectID); err != nil {
- o = InvalidValue
- } else {
- o = obj.EncodeToString()
- }
- }
- return stringTarget{s: c + "/" + o}
-}
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 626372f43..56312f83f 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -17,505 +17,500 @@ const (
)
const (
- InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
- InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
- InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
- InnerringCantGetInnerRingIndex = "can't get inner ring index"
- InnerringCantGetInnerRingSize = "can't get inner ring size"
- InnerringCantGetAlphabetIndex = "can't get alphabet index"
- InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range"
- InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list"
- InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract"
- InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number"
- InnerringNotarySupport = "notary support"
- InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled"
- InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled"
- InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
- InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global"
- InnerringCantVoteForPreparedValidators = "can't vote for prepared validators"
- InnerringNewBlock = "new block"
- InnerringCantUpdatePersistentState = "can't update persistent state"
- InnerringCloserError = "closer error"
- InnerringReadConfigFromBlockchain = "read config from blockchain"
- PolicerCouldNotGetContainer = "could not get container"
- PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
- PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
- PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object"
- PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected"
- PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance"
- PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK"
- PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected"
- PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy"
- PolicerRoutineStopped = "routine stopped"
- PolicerFailureAtObjectSelectForReplication = "failure at object select for replication"
- PolicerPoolSubmission = "pool submission"
- PolicerUnableToProcessObj = "unable to process object"
- ReplicatorFinishWork = "finish work"
- ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage"
- ReplicatorCouldNotReplicateObject = "could not replicate object"
- ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
- TreeRedirectingTreeServiceQuery = "redirecting tree service query"
- TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
- TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
- TreeSynchronizeTree = "synchronize tree"
- TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes"
- TreeFailedToRunTreeSynchronizationForSpecificNode = "failed to run tree synchronization for specific node"
- TreeFailedToParseAddressForTreeSynchronization = "failed to parse address for tree synchronization"
- TreeFailedToConnectForTreeSynchronization = "failed to connect for tree synchronization"
- TreeSyncingTrees = "syncing trees..."
- TreeCouldNotFetchContainers = "could not fetch containers"
- TreeTreesHaveBeenSynchronized = "trees have been synchronized"
- TreeSyncingContainerTrees = "syncing container trees..."
- TreeCouldNotSyncTrees = "could not sync trees"
- TreeContainerTreesHaveBeenSynced = "container trees have been synced"
- TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
- TreeRemovingRedundantTrees = "removing redundant trees..."
- TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
- TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
- TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
- TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
- TreeDoNotSendUpdateToTheNode = "do not send update to the node"
- TreeFailedToSentUpdateToTheNode = "failed to sent update to the node"
- TreeErrorDuringReplication = "error during replication"
- PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage"
- PersistentCouldNotDeleteSToken = "could not delete token"
- PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens"
- TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source"
- DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY"
- DeleteAssemblingChain = "assembling chain..."
- DeleteCollectingChildren = "collecting children..."
- DeleteSupplementBySplitID = "supplement by split ID"
- DeleteFormingTombstoneStructure = "forming tombstone structure..."
- DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..."
- DeleteFormingSplitInfo = "forming split info..."
- DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..."
- DeleteMembersSuccessfullyCollected = "members successfully collected"
- DeleteECObjectReceived = "erasure-coded object received, form tombstone"
- GetRemoteCallFailed = "remote call failed"
- GetCanNotAssembleTheObject = "can not assemble the object"
- GetTryingToAssembleTheObject = "trying to assemble the object..."
- GetTryingToAssembleTheECObject = "trying to assemble the ec object..."
- GetAssemblingSplittedObject = "assembling splitted object..."
- GetAssemblingECObject = "assembling erasure-coded object..."
- GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
- GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
- GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
- GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
- GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
- GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
- GetFailedToAssembleSplittedObject = "failed to assemble splitted object"
- GetFailedToAssembleECObject = "failed to assemble erasure-coded object"
- GetCouldNotGenerateContainerTraverser = "could not generate container traverser"
- GetCouldNotConstructRemoteNodeClient = "could not construct remote node client"
- GetCouldNotWriteHeader = "could not write header"
- GetCouldNotWritePayloadChunk = "could not write payload chunk"
- GetLocalGetFailed = "local get failed"
- GetReturnResultDirectly = "return result directly"
- GetCompletingTheOperation = "completing the operation"
- GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed"
- GetRequestedObjectIsVirtual = "requested object is virtual"
- GetRequestedObjectIsEC = "requested object is erasure-coded"
- GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds"
- GetUnexpectedECObject = "failed to get EC object from node: expected EC info, but got full object"
- PutAdditionalContainerBroadcastFailure = "additional container broadcast failure"
- SearchReturnResultDirectly = "return result directly"
- SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client"
- SearchRemoteOperationFailed = "remote operation failed"
- SearchCouldNotGenerateContainerTraverser = "could not generate container traverser"
- SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
- SearchLocalOperationFailed = "local operation failed"
- UtilObjectServiceError = "object service error"
- V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
- V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
- ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
- ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch"
- ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch"
- ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node"
- ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established"
- ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node"
- ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC"
- ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
- ClientNotaryDepositInvoke = "notary deposit invoke"
- ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
- ClientNotaryRequestInvoked = "notary request invoked"
- ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted"
- ClientNeoClientInvoke = "neo client invoke"
- ClientNativeGasTransferInvoke = "native gas transfer invoke"
- ClientBatchGasTransferInvoke = "batch gas transfer invoke"
- ClientCantGetBlockchainHeight = "can't get blockchain height"
- ClientCantGetBlockchainHeight243 = "can't get blockchain height"
- EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
- EventStopEventListenerByError = "stop event listener by error"
- EventStopEventListenerByContext = "stop event listener by context"
- EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
- EventNilNotificationEventWasCaught = "nil notification event was caught"
- EventStopEventListenerByNotaryChannel = "stop event listener by notary channel"
- EventNilNotaryEventWasCaught = "nil notary event was caught"
- EventStopEventListenerByBlockChannel = "stop event listener by block channel"
- EventNilBlockWasCaught = "nil block was caught"
- EventListenerWorkerPoolDrained = "listener worker pool drained"
- EventEventParserNotSet = "event parser not set"
- EventCouldNotParseNotificationEvent = "could not parse notification event"
- EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered"
- EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event"
- EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event"
- EventNotaryParserNotSet = "notary parser not set"
- EventCouldNotParseNotaryEvent = "could not parse notary event"
- EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
- EventRegisteredNewEventParser = "registered new event parser"
- EventRegisteredNewEventHandler = "registered new event handler"
- EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
- StorageOperation = "local object storage operation"
- BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
- BlobovniczaOpeningBoltDB = "opening BoltDB"
- BlobovniczaInitializing = "initializing..."
- BlobovniczaAlreadyInitialized = "already initialized"
- BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range"
- BlobovniczaClosingBoltDB = "closing BoltDB"
- BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket"
- BlobstorOpening = "opening..."
- BlobstorInitializing = "initializing..."
- BlobstorClosing = "closing..."
- BlobstorCouldntCloseStorage = "couldn't close storage"
- BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking"
- BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration"
- EngineShardHasBeenRemoved = "shard has been removed"
- EngineCouldNotCloseRemovedShard = "could not close removed shard"
- EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping"
- EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard"
- EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping"
- EngineCouldNotCloseShard = "could not close shard"
- EngineCouldNotReloadAShard = "could not reload a shard"
- EngineAddedNewShard = "added new shard"
- EngineCouldNotPutObjectToShard = "could not put object to shard"
- EngineCouldNotCheckObjectExistence = "could not check object existence when put object to shard"
- EngineErrorDuringSearchingForObjectChildren = "error during searching for object children"
- EngineCouldNotInhumeObjectInShard = "could not inhume object in shard"
- EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies"
- EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine"
- EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies"
- EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check"
- EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
- EngineInterruptGettingLockers = "can't get object's lockers"
- EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
- EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones"
- EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
- EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
- EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
- EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold"
- EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request"
- EngineStartedShardsEvacuation = "started shards evacuation"
- EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
- EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
- EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
- MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
- MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
- MetabaseCheckingMetabaseVersion = "checking metabase version"
- ShardCantSelectAllObjects = "can't select all objects"
- ShardSettingShardMode = "setting shard mode"
- ShardShardModeSetSuccessfully = "shard mode set successfully"
- ShardFetchingObjectWithoutMeta = "fetching object without meta"
- ShardObjectIsMissingInWritecache = "object is missing in write-cache"
- ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache"
- ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor"
- ShardMetaObjectCounterRead = "meta: object counter read"
- ShardMetaCantReadContainerList = "meta: can't read container list"
- ShardMetaCantReadContainerSize = "meta: can't read container size"
- ShardMetaInfoPresentButObjectNotFound = "meta info was present, but the object is missing"
- ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
- ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
- ShardCouldNotUnmarshalObject = "could not unmarshal object"
- ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
- ShardCouldNotCloseShardComponent = "could not close shard component"
- ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
- ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
- ShardStopEventListenerByClosedEventChannel = "stop event listener by closed `event` channel"
- ShardStopEventListenerByClosedStopChannel = "stop event listener by closed `stop` channel"
- ShardEventProcessingInProgress = "event processing is in progress, skip the received"
- ShardStopEventListenerByContext = "stop event listener by context"
- ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool"
- ShardGCIsStopped = "GC is stopped"
- ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..."
- ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed"
- ShardCouldNotDeleteTheObjects = "could not delete the objects"
- ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed"
- ShardCouldNotInhumeTheObjects = "could not inhume the objects"
- ShardStartedExpiredTombstonesHandling = "started expired tombstones handling"
- ShardIteratingTombstones = "iterating tombstones"
- ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones"
- ShardIteratorOverGraveyardFailed = "iterator over graveyard failed"
- ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch"
- ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling"
- ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed"
- ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage"
- ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records"
- ShardFailureToUnlockObjects = "failure to unlock objects"
- ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
- ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
- ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
- ShardCouldNotFindObject = "could not find object"
- WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
- WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
- BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
- BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
- BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level"
- BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza"
- BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed"
- BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza"
- BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's"
- BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..."
- BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..."
- AlphabetTick = "tick"
- AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained"
- AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event"
- AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event"
- AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method"
- AlphabetStorageNodeEmissionIsOff = "storage node emission is off"
- AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes"
- AlphabetGasEmission = "gas emission"
- AlphabetCantParseNodePublicKey = "can't parse node public key"
- AlphabetCantTransferGas = "can't transfer gas"
- AlphabetCantTransferGasToWallet = "can't transfer gas to wallet"
- AlphabetAlphabetWorkerPool = "alphabet worker pool"
- BalanceBalanceWorkerPoolDrained = "balance worker pool drained"
- BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock"
- BalanceCantSendLockAssetTx = "can't send lock asset tx"
- BalanceBalanceWorkerPool = "balance worker pool"
- ContainerContainerWorkerPool = "container worker pool"
- ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained"
- ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put"
- ContainerPutContainerCheckFailed = "put container check failed"
- ContainerCouldNotApprovePutContainer = "could not approve put container"
- ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
- ContainerDeleteContainerCheckFailed = "delete container check failed"
- ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
- FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
- FrostFSCantRelaySetConfigEvent = "can't relay set config event"
- FrostFSFrostfsWorkerPool = "frostfs worker pool"
- FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained"
- FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit"
- FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract"
- FrostFSDoubleMintEmissionDeclined = "double mint emission declined"
- FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node"
- FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached"
- FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver"
- FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw"
- FrostFSCantCreateLockAccount = "can't create lock account"
- FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw"
- FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque"
- FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract"
- GovernanceNewEvent = "new event"
- GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained"
- GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync"
- GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net"
- GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain"
- GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain"
- GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed"
- GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update"
- GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee"
- GovernanceFinishedAlphabetListUpdate = "finished alphabet list update"
- GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain"
- GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys"
- GovernanceUpdateOfTheInnerRingList = "update of the inner ring list"
- GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys"
- GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain"
- GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract"
- NetmapNetmapWorkerPool = "netmap worker pool"
- NetmapTick = "tick"
- NetmapNetmapWorkerPoolDrained = "netmap worker pool drained"
- NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled"
- NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick"
- NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node"
- NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap"
- NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState"
- NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache"
- NetmapCantGetEpochDuration = "can't get epoch duration"
- NetmapCantGetTransactionHeight = "can't get transaction height"
- NetmapCantResetEpochTimer = "can't reset epoch timer"
- NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
- NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
- NetmapNextEpoch = "next epoch"
- NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
- NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification"
- NetmapNonhaltNotaryTransaction = "non-halt notary transaction"
- NetmapCantParseNetworkMapCandidate = "can't parse network map candidate"
- NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate"
- NetmapApprovingNetworkMapCandidate = "approving network map candidate"
- NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer"
- NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification"
- NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state"
- NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer"
- FrostFSIRInternalError = "internal error"
- FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server"
- FrostFSIRApplicationStopped = "application stopped"
- FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint"
- FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint"
- FrostFSIRReloadExtraWallets = "reload extra wallets"
- FrostFSNodeStartListeningEndpoint = "start listening endpoint"
- FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file"
- FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint"
- FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint"
- FrostFSNodeStoppingGRPCServer = "stopping gRPC server..."
- FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop"
- FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully"
- FrostFSNodeGRPCServerError = "gRPC server error"
- FrostFSNodeGRPCReconnecting = "reconnecting gRPC server..."
- FrostFSNodeGRPCReconnectedSuccessfully = "gRPC server reconnected successfully"
- FrostFSNodeGRPCServerConfigNotFound = "gRPC server config not found"
- FrostFSNodeGRPCReconnectFailed = "failed to reconnect gRPC server"
- FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop"
- FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance"
- FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance"
- FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine"
- FrostFSNodeShardAttachedToEngine = "shard attached to engine"
- FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..."
- FrostFSNodeAccessPolicyEngineClosingFailure = "ape closing failure"
- FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure"
- FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed = "persistent rule storage db path is not set: in-memory will be used"
- FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully"
- FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state"
- FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state"
- FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..."
- FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete"
- FrostFSNodeInternalApplicationError = "internal application error"
- FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete"
- FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..."
- FrostFSNodeSIGHUPSkip = "node is not ready for reconfiguration, skipped SIGHUP"
- FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
- FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
- FrostFSNodeConfigurationReading = "configuration reading"
- FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
- FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
- FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
- FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying"
- FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully"
- FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
- FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
- FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
- FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
- FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
- FrostFSNodeFailedInitTracing = "failed init tracing"
- FrostFSNodeFailedShutdownTracing = "failed shutdown tracing"
- FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client"
- FrostFSNodeClosingMorphComponents = "closing morph components..."
- FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global"
- FrostFSNodeNotarySupport = "notary support"
- FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network"
- FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
- FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
- FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain"
- FrostFSNodeNewBlock = "new block"
- FrostFSNodeCantUpdatePersistentState = "can't update persistent state"
- FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx"
- FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch"
- FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit"
- FrostFSNodeInitialNetworkState = "initial network state"
- FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization"
- FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service"
- FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container"
- FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed"
- FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)"
- FrostFSNodePolicerIsDisabled = "policer is disabled"
- CommonApplicationStarted = "application started"
- ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
- ShardGCCollectingExpiredObjectsCompleted = "collecting expired objects completed"
- ShardGCCollectingExpiredLocksStarted = "collecting expired locks started"
- ShardGCCollectingExpiredLocksCompleted = "collecting expired locks completed"
- ShardGCRemoveGarbageStarted = "garbage remove started"
- ShardGCRemoveGarbageCompleted = "garbage remove completed"
- EngineShardsEvacuationFailedToCount = "failed to get total objects count to evacuate"
- EngineShardsEvacuationFailedToListObjects = "failed to list objects to evacuate"
- EngineShardsEvacuationFailedToReadObject = "failed to read object to evacuate"
- EngineShardsEvacuationFailedToMoveObject = "failed to evacuate object to other node"
- ShardGCFailedToGetExpiredWithLinked = "failed to get expired objects with linked"
- FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap = "the node is under maintenance, skip initial bootstrap"
- EngineCouldNotChangeShardModeToDisabled = "could not change shard mode to disabled"
- RPConnectionLost = "RPC connection lost, attempting reconnect"
- RPCNodeSwitchFailure = "can't switch RPC node"
- FSTreeCantUnmarshalObject = "can't unmarshal an object"
- FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
- FSTreeCantUpdateID = "can't update object storage ID"
- PutSingleRedirectFailure = "failed to redirect PutSingle request"
- StorageIDRetrievalFailure = "can't get storage ID from metabase"
- ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
- CandidateStatusPriority = "candidate status is different from the netmap status, the former takes priority"
- TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
- RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
- RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
- AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza"
- FailedToGetContainerCounters = "failed to get container counters values"
- FailedToRebuildBlobstore = "failed to rebuild blobstore"
- BlobstoreRebuildStarted = "blobstore rebuild started"
- BlobstoreRebuildCompletedSuccessfully = "blobstore rebuild completed successfully"
- BlobstoreRebuildStopped = "blobstore rebuild stopped"
- BlobstorRebuildFailedToRebuildStorages = "failed to rebuild storages"
- BlobstorRebuildRebuildStoragesCompleted = "storages rebuild completed"
- BlobovniczaTreeCollectingDBToRebuild = "collecting blobovniczas to rebuild..."
- BlobovniczaTreeCollectingDBToRebuildFailed = "collecting blobovniczas to rebuild failed"
- BlobovniczaTreeCollectingDBToRebuildSuccess = "collecting blobovniczas to rebuild completed successfully"
- BlobovniczaTreeRebuildingBlobovnicza = "rebuilding blobovnicza..."
- BlobovniczaTreeRebuildingBlobovniczaFailed = "rebuilding blobovnicza failed"
- BlobovniczaTreeRebuildingBlobovniczaSuccess = "rebuilding blobovnicza completed successfully"
- BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza = "could not put move info to source blobovnicza"
- BlobovniczatreeCouldNotUpdateStorageID = "could not update storage ID"
- BlobovniczatreeCouldNotDropMoveInfo = "could not drop move info from source blobovnicza"
- BlobovniczatreeCouldNotDeleteFromSource = "could not delete object from source blobovnicza"
- BlobovniczaTreeCompletingPreviousRebuild = "completing previous rebuild if failed..."
- BlobovniczaTreeCompletedPreviousRebuildSuccess = "previous rebuild completed successfully"
- BlobovniczaTreeCompletedPreviousRebuildFailed = "failed to complete previous rebuild"
- BlobovniczatreeCouldNotCheckExistenceInTargetDB = "could not check object existence in target blobovnicza"
- BlobovniczatreeCouldNotPutObjectToTargetDB = "could not put object to target blobovnicza"
- BlobovniczaSavingCountersToMeta = "saving counters to blobovnicza's meta..."
- BlobovniczaSavingCountersToMetaSuccess = "saving counters to blobovnicza's meta completed successfully"
- BlobovniczaSavingCountersToMetaFailed = "saving counters to blobovnicza's meta failed"
- ObjectRemovalFailureExistsInWritecache = "can't remove object: object must be flushed from writecache"
- FailedToReportStatusToSystemd = "failed to report status to systemd"
- ShardGCCollectingExpiredMetricsStarted = "collecting expired metrics started"
- ShardGCCollectingExpiredMetricsCompleted = "collecting expired metrics completed"
- ShardGCFailedToCollectZeroSizeContainers = "failed to collect zero-size containers"
- ShardGCFailedToCollectZeroCountContainers = "failed to collect zero-count containers"
- EngineFailedToCheckContainerAvailability = "failed to check container availability"
- EngineFailedToGetContainerSize = "failed to get container size"
- EngineFailedToDeleteContainerSize = "failed to delete container size"
- EngineInterruptProcessingZeroSizeContainers = "interrupt processing zero-size containers"
- EngineInterruptProcessingZeroCountContainers = "interrupt processing zero-count containers"
- EngineFailedToGetContainerCounters = "failed to get container counters"
- GetSvcV2FailedToParseNodeEndpoints = "failed to parse node endpoints"
- GetSvcV2FailedToParseNodeExternalAddresses = "failed to parse node external addresses"
- GetSvcV2FailedToGetRangeHashFromNode = "failed to get range hash from node"
- GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes = "failed to get range hash from all of container nodes"
- FailedToUpdateShardID = "failed to update shard id"
- EngineShardsEvacuationFailedToMoveTree = "failed to evacuate tree to other node"
- EngineShardsEvacuationTreeEvacuatedLocal = "tree evacuated to local node"
- EngineShardsEvacuationTreeEvacuatedRemote = "tree evacuated to other node"
- EngineRefillFailedToGetObjectsCount = "failed to get blobstor objects count, no resync percent estimation is available"
- ECFailedToSendToContainerNode = "failed to send EC object to container node"
- ECFailedToSaveECPart = "failed to save EC part"
- PolicerNodeIsNotECObjectNode = "current node is not EC object node"
- PolicerFailedToGetLocalECChunks = "failed to get local EC chunks"
- PolicerMissingECChunk = "failed to find EC chunk on any of the nodes"
- PolicerFailedToDecodeECChunkID = "failed to decode EC chunk ID"
- PolicerDifferentObjectIDForTheSameECChunk = "different object IDs for the same EC chunk"
- ReplicatorCouldNotGetObjectFromRemoteStorage = "could not get object from remote storage"
- ReplicatorCouldNotPutObjectToLocalStorage = "could not put object to local storage"
- PolicerCouldNotGetObjectFromNodeMoving = "could not get EC object from the node, moving current chunk to the node"
- PolicerCouldNotRestoreObjectNotEnoughChunks = "could not restore EC object: not enough chunks"
- PolicerFailedToRestoreObject = "failed to restore EC object"
- PolicerCouldNotGetChunk = "could not get EC chunk"
- PolicerCouldNotGetChunks = "could not get EC chunks"
- AuditEventLogRecord = "audit event log record"
- StartedWritecacheSealAsync = "started writecache seal async"
- WritecacheSealCompletedAsync = "writecache seal completed successfully"
- FailedToSealWritecacheAsync = "failed to seal writecache async"
- WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
- BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
- WritecacheCantGetObject = "can't get an object from fstree"
- FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
- FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
- NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
- FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
- FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
- WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
- FailedToUpdateNetmapCandidates = "update netmap candidates failed"
- UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used"
+ InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations"
+ InnerringCantStopEpochEstimation = "can't stop epoch estimation"
+ InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
+ InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
+ InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ InnerringCantGetInnerRingIndex = "can't get inner ring index"
+ InnerringCantGetInnerRingSize = "can't get inner ring size"
+ InnerringCantGetAlphabetIndex = "can't get alphabet index"
+ InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range"
+ InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list"
+ InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract"
+ InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number"
+ InnerringNotarySupport = "notary support"
+ InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled"
+ InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled"
+ InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
+ InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global"
+ InnerringCantVoteForPreparedValidators = "can't vote for prepared validators"
+ InnerringNewBlock = "new block"
+ InnerringCantUpdatePersistentState = "can't update persistent state"
+ InnerringCloserError = "closer error"
+ InnerringReadConfigFromBlockchain = "read config from blockchain"
+ NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
+ NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
+ PolicerCouldNotGetContainer = "could not get container"
+ PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
+ PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
+ PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object"
+ PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected"
+ PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance"
+ PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK"
+ PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected"
+ PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy"
+ PolicerRoutineStopped = "routine stopped"
+ PolicerFailureAtObjectSelectForReplication = "failure at object select for replication"
+ PolicerPoolSubmission = "pool submission"
+ PolicerUnableToProcessObj = "unable to process object"
+ ReplicatorFinishWork = "finish work"
+ ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage"
+ ReplicatorCouldNotReplicateObject = "could not replicate object"
+ ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
+ TreeRedirectingTreeServiceQuery = "redirecting tree service query"
+ TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
+ TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
+ TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
+ TreeSynchronizeTree = "synchronize tree"
+ TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes"
+ TreeSyncingTrees = "syncing trees..."
+ TreeCouldNotFetchContainers = "could not fetch containers"
+ TreeTreesHaveBeenSynchronized = "trees have been synchronized"
+ TreeSyncingContainerTrees = "syncing container trees..."
+ TreeCouldNotSyncTrees = "could not sync trees"
+ TreeContainerTreesHaveBeenSynced = "container trees have been synced"
+ TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
+ TreeRemovingRedundantTrees = "removing redundant trees..."
+ TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
+ TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
+ TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
+ TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
+ TreeDoNotSendUpdateToTheNode = "do not send update to the node"
+ TreeFailedToSentUpdateToTheNode = "failed to sent update to the node"
+ TreeErrorDuringReplication = "error during replication"
+ PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage"
+ PersistentCouldNotDeleteSToken = "could not delete token"
+ PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens"
+ ControllerReportIsAlreadyStarted = "report is already started"
+ TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source"
+ DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY"
+ DeleteAssemblingChain = "assembling chain..."
+ DeleteCollectingChildren = "collecting children..."
+ DeleteSupplementBySplitID = "supplement by split ID"
+ DeleteFormingTombstoneStructure = "forming tombstone structure..."
+ DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..."
+ DeleteFormingSplitInfo = "forming split info..."
+ DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..."
+ DeleteMembersSuccessfullyCollected = "members successfully collected"
+ GetRemoteCallFailed = "remote call failed"
+ GetCanNotAssembleTheObject = "can not assemble the object"
+ GetTryingToAssembleTheObject = "trying to assemble the object..."
+ GetAssemblingSplittedObject = "assembling splitted object..."
+ GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
+ GetFailedToAssembleSplittedObject = "failed to assemble splitted object"
+ GetCouldNotGenerateContainerTraverser = "could not generate container traverser"
+ GetCouldNotConstructRemoteNodeClient = "could not construct remote node client"
+ GetCouldNotWriteHeader = "could not write header"
+ GetCouldNotWritePayloadChunk = "could not write payload chunk"
+ GetLocalGetFailed = "local get failed"
+ GetReturnResultDirectly = "return result directly"
+ GetCompletingTheOperation = "completing the operation"
+ GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed"
+ GetRequestedObjectIsVirtual = "requested object is virtual"
+ GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds"
+ PutAdditionalContainerBroadcastFailure = "additional container broadcast failure"
+ SearchReturnResultDirectly = "return result directly"
+ SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client"
+ SearchRemoteOperationFailed = "remote operation failed"
+ SearchCouldNotGenerateContainerTraverser = "could not generate container traverser"
+ SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
+ SearchLocalOperationFailed = "local operation failed"
+ UtilObjectServiceError = "object service error"
+ UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
+ V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
+ V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
+ NatsNatsConnectionWasLost = "nats: connection was lost"
+ NatsNatsReconnectedToTheServer = "nats: reconnected to the server"
+ NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done"
+ NatsConnectedToEndpoint = "nats: successfully connected to endpoint"
+ ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics"
+ ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics"
+ ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator"
+ ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted"
+ ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements"
+ ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished"
+ ControllerAnnouncementIsAlreadyStarted = "announcement is already started"
+ ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted"
+ ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted"
+ ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements"
+ ControllerCouldNotInitializeResultTarget = "could not initialize result target"
+ ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted"
+ ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations"
+ RouteCouldNotInitializeWriterProvider = "could not initialize writer provider"
+ RouteCouldNotInitializeWriter = "could not initialize writer"
+ RouteCouldNotPutTheValue = "could not put the value"
+ RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer"
+ ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
+ ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch"
+ ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch"
+ ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node"
+ ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established"
+ ClientSwitchingToTheNextRPCNode = "switching to the next RPC node"
+ ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node"
+ ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node"
+ ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC"
+ ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node"
+ ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ ClientNotaryDepositInvoke = "notary deposit invoke"
+ ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
+ ClientNotaryRequestInvoked = "notary request invoked"
+ ClientNeoClientInvoke = "neo client invoke"
+ ClientNativeGasTransferInvoke = "native gas transfer invoke"
+ ClientBatchGasTransferInvoke = "batch gas transfer invoke"
+ ClientCantGetBlockchainHeight = "can't get blockchain height"
+ ClientCantGetBlockchainHeight243 = "can't get blockchain height"
+ EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
+ EventCouldNotStartListenToEvents = "could not start listen to events"
+ EventStopEventListenerByError = "stop event listener by error"
+ EventStopEventListenerByContext = "stop event listener by context"
+ EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
+ EventNilNotificationEventWasCaught = "nil notification event was caught"
+ EventStopEventListenerByNotaryChannel = "stop event listener by notary channel"
+ EventNilNotaryEventWasCaught = "nil notary event was caught"
+ EventStopEventListenerByBlockChannel = "stop event listener by block channel"
+ EventNilBlockWasCaught = "nil block was caught"
+ EventListenerWorkerPoolDrained = "listener worker pool drained"
+ EventEventParserNotSet = "event parser not set"
+ EventCouldNotParseNotificationEvent = "could not parse notification event"
+ EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered"
+ EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event"
+ EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event"
+ EventNotaryParserNotSet = "notary parser not set"
+ EventCouldNotParseNotaryEvent = "could not parse notary event"
+ EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
+ EventIgnoreNilEventParser = "ignore nil event parser"
+ EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
+ EventRegisteredNewEventParser = "registered new event parser"
+ EventIgnoreNilEventHandler = "ignore nil event handler"
+ EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
+ EventRegisteredNewEventHandler = "registered new event handler"
+ EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
+ EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
+ EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
+ EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
+ EventIgnoreNilBlockHandler = "ignore nil block handler"
+ SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed"
+ SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct"
+ SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain"
+ SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block"
+ SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct"
+ SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain"
+ BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
+ BlobovniczaOpeningBoltDB = "opening BoltDB"
+ BlobovniczaInitializing = "initializing..."
+ BlobovniczaAlreadyInitialized = "already initialized"
+ BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range"
+ BlobovniczaClosingBoltDB = "closing BoltDB"
+ BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket"
+ BlobstorOpening = "opening..."
+ BlobstorInitializing = "initializing..."
+ BlobstorClosing = "closing..."
+ BlobstorCouldntCloseStorage = "couldn't close storage"
+ BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking"
+ BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration"
+ EngineShardHasBeenRemoved = "shard has been removed"
+ EngineCouldNotCloseRemovedShard = "could not close removed shard"
+ EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping"
+ EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard"
+ EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping"
+ EngineCouldNotCloseShard = "could not close shard"
+ EngineCouldNotReloadAShard = "could not reload a shard"
+ EngineAddedNewShard = "added new shard"
+ EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation"
+ EngineCouldNotPutObjectToShard = "could not put object to shard"
+ EngineErrorDuringSearchingForObjectChildren = "error during searching for object children"
+ EngineCouldNotInhumeObjectInShard = "could not inhume object in shard"
+ EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies"
+ EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine"
+ EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies"
+ EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check"
+ EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
+ EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
+ EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
+ EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
+ EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
+ EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold"
+ EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request"
+ EngineStartedShardsEvacuation = "started shards evacuation"
+ EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
+ EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
+ EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
+ MetabaseMissingMatcher = "missing matcher"
+ MetabaseErrorInFKBTSelection = "error in FKBT selection"
+ MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
+ MetabaseUnknownOperation = "unknown operation"
+ MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
+ MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
+ MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
+ MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
+ MetabaseCheckingMetabaseVersion = "checking metabase version"
+ ShardCantSelectAllObjects = "can't select all objects"
+ ShardSettingShardMode = "setting shard mode"
+ ShardShardModeSetSuccessfully = "shard mode set successfully"
+ ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase"
+ ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache"
+ ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase"
+ ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor"
+ ShardFetchingObjectWithoutMeta = "fetching object without meta"
+ ShardObjectIsMissingInWritecache = "object is missing in write-cache"
+ ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache"
+ ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor"
+ ShardMetaObjectCounterRead = "meta: object counter read"
+ ShardMetaCantReadContainerList = "meta: can't read container list"
+ ShardMetaCantReadContainerSize = "meta: can't read container size"
+ ShardMetaInfoPresentButObjectNotFound = "meta info was present, but the object is missing"
+ ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
+ ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
+ ShardCouldNotUnmarshalObject = "could not unmarshal object"
+ ShardCouldNotCloseShardComponent = "could not close shard component"
+ ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
+ ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
+ ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode"
+ ShardStopEventListenerByClosedChannel = "stop event listener by closed channel"
+ ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool"
+ ShardGCIsStopped = "GC is stopped"
+ ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..."
+ ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed"
+ ShardCouldNotDeleteTheObjects = "could not delete the objects"
+ ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed"
+ ShardCouldNotInhumeTheObjects = "could not inhume the objects"
+ ShardStartedExpiredTombstonesHandling = "started expired tombstones handling"
+ ShardIteratingTombstones = "iterating tombstones"
+ ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones"
+ ShardIteratorOverGraveyardFailed = "iterator over graveyard failed"
+ ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch"
+ ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling"
+ ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed"
+ ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage"
+ ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records"
+ ShardFailureToUnlockObjects = "failure to unlock objects"
+ ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
+ ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
+ ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
+ WritecacheBadgerInitExperimental = "initializing badger-backed experimental writecache"
+ WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
+ WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
+ WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree"
+ WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks"
+ WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database"
+ WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks"
+ WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
+ WritecacheCantParseAddress = "can't parse address"
+ WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
+ WritecacheDBValueLogGCRunCompleted = "value log GC run completed"
+ WritecacheBadgerObjAlreadyScheduled = "object already scheduled for flush"
+ BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
+ BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza"
+ BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza"
+ BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
+ BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict"
+ BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..."
+ BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated"
+ BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated"
+ BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level"
+ BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza"
+ BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza"
+ BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza"
+ BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed"
+ BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza"
+ BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza"
+ BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza"
+ BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza"
+ BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's"
+ BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..."
+ BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..."
+ BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza"
+ AlphabetTick = "tick"
+ AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained"
+ AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event"
+ AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event"
+ AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method"
+ AlphabetStorageNodeEmissionIsOff = "storage node emission is off"
+ AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes"
+ AlphabetGasEmission = "gas emission"
+ AlphabetCantParseNodePublicKey = "can't parse node public key"
+ AlphabetCantTransferGas = "can't transfer gas"
+ AlphabetCantTransferGasToWallet = "can't transfer gas to wallet"
+ AlphabetAlphabetWorkerPool = "alphabet worker pool"
+ BalanceBalanceWorkerPoolDrained = "balance worker pool drained"
+ BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock"
+ BalanceCantSendLockAssetTx = "can't send lock asset tx"
+ BalanceBalanceWorkerPool = "balance worker pool"
+ ContainerContainerWorkerPool = "container worker pool"
+ ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained"
+ ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put"
+ ContainerPutContainerCheckFailed = "put container check failed"
+ ContainerCouldNotApprovePutContainer = "could not approve put container"
+ ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
+ ContainerDeleteContainerCheckFailed = "delete container check failed"
+ ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
+ ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
+ ContainerSetEACLCheckFailed = "set EACL check failed"
+ ContainerCouldNotApproveSetEACL = "could not approve set EACL"
+ FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind"
+ FrostFSInvalidManageKeyEvent = "invalid manage key event"
+ FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes"
+ FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
+ FrostFSCantRelaySetConfigEvent = "can't relay set config event"
+ FrostFSFrostfsWorkerPool = "frostfs worker pool"
+ FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained"
+ FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit"
+ FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract"
+ FrostFSDoubleMintEmissionDeclined = "double mint emission declined"
+ FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node"
+ FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached"
+ FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver"
+ FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw"
+ FrostFSCantCreateLockAccount = "can't create lock account"
+ FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw"
+ FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque"
+ FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract"
+ GovernanceNewEvent = "new event"
+ GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained"
+ GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync"
+ GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net"
+ GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain"
+ GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain"
+ GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed"
+ GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update"
+ GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee"
+ GovernanceFinishedAlphabetListUpdate = "finished alphabet list update"
+ GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain"
+ GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys"
+ GovernanceUpdateOfTheInnerRingList = "update of the inner ring list"
+ GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys"
+ GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain"
+ GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract"
+ NetmapNetmapWorkerPool = "netmap worker pool"
+ NetmapTick = "tick"
+ NetmapNetmapWorkerPoolDrained = "netmap worker pool drained"
+ NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled"
+ NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick"
+ NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node"
+ NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap"
+ NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState"
+ NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache"
+ NetmapCantGetEpochDuration = "can't get epoch duration"
+ NetmapCantGetTransactionHeight = "can't get transaction height"
+ NetmapCantResetEpochTimer = "can't reset epoch timer"
+ NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
+ NetmapCantStartContainerSizeEstimation = "can't start container size estimation"
+ NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
+ NetmapNextEpoch = "next epoch"
+ NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
+ NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification"
+ NetmapNonhaltNotaryTransaction = "non-halt notary transaction"
+ NetmapCantParseNetworkMapCandidate = "can't parse network map candidate"
+ NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate"
+ NetmapApprovingNetworkMapCandidate = "approving network map candidate"
+ NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer"
+ NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification"
+ NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state"
+ NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer"
+ FrostFSIRInternalError = "internal error"
+ FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server"
+ FrostFSIRApplicationStopped = "application stopped"
+ FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint"
+ FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint"
+ FrostFSIRReloadExtraWallets = "reload extra wallets"
+ FrostFSNodeStartListeningEndpoint = "start listening endpoint"
+ FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file"
+ FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint"
+ FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint"
+ FrostFSNodeStoppingGRPCServer = "stopping gRPC server..."
+ FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop"
+ FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully"
+ FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop"
+ FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance"
+ FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance"
+ FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine"
+ FrostFSNodeShardAttachedToEngine = "shard attached to engine"
+ FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..."
+ FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure"
+ FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully"
+ FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state"
+ FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state"
+ FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..."
+ FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete"
+ FrostFSNodeInternalApplicationError = "internal application error"
+ FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete"
+ FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..."
+ FrostFSNodeConfigurationReading = "configuration reading"
+ FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
+ FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
+ FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
+ FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying"
+ FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully"
+ FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
+ FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
+ FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
+ FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract"
+ FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine"
+ FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully"
+ FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
+ FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
+ FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
+ FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
+ FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
+ FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
+ FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
+ FrostFSNodeFailedInitTracing = "failed init tracing"
+ FrostFSNodeFailedShutdownTracing = "failed shutdown tracing"
+ FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client"
+ FrostFSNodeClosingMorphComponents = "closing morph components..."
+ FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global"
+ FrostFSNodeNotarySupport = "notary support"
+ FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network"
+ FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
+ FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
+ FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain"
+ FrostFSNodeNewBlock = "new block"
+ FrostFSNodeCantUpdatePersistentState = "can't update persistent state"
+ FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx"
+ FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch"
+ FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit"
+ FrostFSNodeInitialNetworkState = "initial network state"
+ FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization"
+ FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service"
+ FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container"
+ FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed"
+ FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)"
+ FrostFSNodePolicerIsDisabled = "policer is disabled"
+ CommonApplicationStarted = "application started"
+ ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
+ ShardGCCollectingExpiredObjectsCompleted = "collecting expired objects completed"
+ ShardGCCollectingExpiredLocksStarted = "collecting expired locks started"
+ ShardGCCollectingExpiredLocksCompleted = "collecting expired locks completed"
+ ShardGCRemoveGarbageStarted = "garbage remove started"
+ ShardGCRemoveGarbageCompleted = "garbage remove completed"
+ EngineShardsEvacuationFailedToCount = "failed to get total objects count to evacuate"
+ EngineShardsEvacuationFailedToListObjects = "failed to list objects to evacuate"
+ EngineShardsEvacuationFailedToReadObject = "failed to read object to evacuate"
+ EngineShardsEvacuationFailedToMoveObject = "failed to evacuate object to other node"
+ ShardGCFailedToGetExpiredWithLinked = "failed to get expired objects with linked"
+ ShardDeleteCantDeleteFromWriteCache = "can't delete object from write cache"
+ FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap = "the node is under maintenance, skip initial bootstrap"
+ EngineCouldNotChangeShardModeToDisabled = "could not change shard mode to disabled"
+ NetmapNodeAlreadyInCandidateListOnlineSkipInitialBootstrap = "the node is already in candidate list with online state, skip initial bootstrap"
+ RPConnectionLost = "RPC connection lost, attempting reconnect"
+ RPCNodeSwitchFailure = "can't switch RPC node"
+ FSTreeCantReadFile = "can't read a file"
+ FSTreeCantUnmarshalObject = "can't unmarshal an object"
+ FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
+ FSTreeCantUpdateID = "can't update object storage ID"
+ FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
+ PutSingleRedirectFailure = "failed to redirect PutSingle request"
+ StorageIDRetrievalFailure = "can't get storage ID from metabase"
+ ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
+ CandidateStatusPriority = "candidate status is different from the netmap status, the former takes priority"
+ TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
+ FrostFSNodeCantUpdateObjectStorageID = "can't update object storage ID"
+ FrostFSNodeCantFlushObjectToBlobstor = "can't flush an object to blobstor"
+ FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB"
+ FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB"
+ RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
+ RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
+ FailedToCountWritecacheItems = "failed to count writecache items"
+ AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza"
)
diff --git a/internal/metrics/application.go b/internal/metrics/application.go
deleted file mode 100644
index 53acf9b7f..000000000
--- a/internal/metrics/application.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package metrics
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type ApplicationInfo struct {
- versionValue *prometheus.GaugeVec
-}
-
-func NewApplicationInfo(version string) *ApplicationInfo {
- appInfo := &ApplicationInfo{
- versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Name: "app_info",
- Help: "General information about the application.",
- }, []string{"version"}),
- }
- appInfo.versionValue.With(prometheus.Labels{"version": version})
- return appInfo
-}
diff --git a/internal/metrics/cache.go b/internal/metrics/cache.go
deleted file mode 100644
index 8181586e2..000000000
--- a/internal/metrics/cache.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package metrics
-
-import (
- "strconv"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var cacheRequests = metrics.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: namespace,
- Subsystem: commonCacheSubsystem,
- Name: "request_duration_seconds",
- Help: "Accumulated common cache request process duration",
-}, []string{hitLabel, methodLabel, cacheLabel})
-
-type CacheMetrics struct {
- cache string
-}
-
-// NewCacheMetrics returns new CacheMetrics instance for cache specified.
-func NewCacheMetrics(cache string) *CacheMetrics {
- return &CacheMetrics{
- cache: cache,
- }
-}
-
-func (m *CacheMetrics) AddMethodDuration(method string, d time.Duration, hit bool) {
- cacheRequests.With(prometheus.Labels{
- hitLabel: strconv.FormatBool(hit),
- methodLabel: method,
- cacheLabel: m.cache,
- }).Observe(d.Seconds())
-}
diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go
deleted file mode 100644
index 1d01c95ed..000000000
--- a/internal/metrics/engine.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package metrics
-
-import (
- "strconv"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type EngineMetrics interface {
- AddMethodDuration(method string, d time.Duration)
- AddToContainerSize(cnrID string, size int64)
- DeleteContainerSize(cnrID string)
- DeleteContainerCount(cnrID string)
- IncErrorCounter(shardID string)
- ClearErrorCounter(shardID string)
- DeleteShardMetrics(shardID string)
- AddToObjectCounter(shardID, objectType string, delta int)
- SetObjectCounter(shardID, objectType string, v uint64)
- AddToPayloadCounter(shardID string, size int64)
- SetMode(shardID string, mode mode.Mode)
- SetContainerObjectCounter(shardID, contID, objectType string, v uint64)
- IncContainerObjectCounter(shardID, contID, objectType string)
- SubContainerObjectCounter(shardID, contID, objectType string, v uint64)
- IncRefillObjectsCount(shardID, path string, size int, success bool)
- SetRefillPercent(shardID, path string, percent uint32)
- SetRefillStatus(shardID, path, status string)
- SetEvacuationInProgress(shardID string, value bool)
-
- WriteCache() WriteCacheMetrics
- GC() GCMetrics
-}
-
-type engineMetrics struct {
- methodDuration *prometheus.HistogramVec
- objectCounter *prometheus.GaugeVec
- containerSize *prometheus.GaugeVec
- payloadSize *prometheus.GaugeVec
- errorCounter *prometheus.GaugeVec
- mode *shardIDModeValue
- contObjCounter *prometheus.GaugeVec
-
- refillStatus *shardIDPathModeValue
- refillObjCounter *prometheus.GaugeVec
- refillPayloadCounter *prometheus.GaugeVec
- refillPercentCounter *prometheus.GaugeVec
- evacuationInProgress *shardIDModeValue
-
- gc *gcMetrics
- writeCache *writeCacheMetrics
-}
-
-func newEngineMetrics() *engineMetrics {
- return &engineMetrics{
- containerSize: newEngineGaugeVector("container_size_bytes", "Accumulated size of all objects in a container", []string{containerIDLabelKey}),
- payloadSize: newEngineGaugeVector("payload_size_bytes", "Accumulated size of all objects in a shard", []string{shardIDLabel}),
- errorCounter: newEngineGaugeVector("errors_total", "Shard's error counter", []string{shardIDLabel}),
- methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "request_duration_seconds",
- Help: "Duration of Engine requests",
- }, []string{methodLabel}),
- objectCounter: newEngineGaugeVector("objects_total",
- "Objects counters per shards. DEPRECATED: Will be deleted in next releasese, use frostfs_node_engine_container_objects_total metric.",
- []string{shardIDLabel, typeLabel}),
- gc: newGCMetrics(),
- writeCache: newWriteCacheMetrics(),
- mode: newShardIDMode(engineSubsystem, "mode_info", "Shard mode"),
- contObjCounter: newEngineGaugeVector("container_objects_total", "Count of objects for each container", []string{shardIDLabel, containerIDLabelKey, typeLabel}),
- refillStatus: newShardIDPathMode(engineSubsystem, "resync_metabase_status", "Resync from blobstore to metabase status"),
- refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
- refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
- refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}),
- evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"),
- }
-}
-
-func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec {
- return metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: name,
- Help: help,
- }, labels)
-}
-
-func (m *engineMetrics) AddMethodDuration(method string, d time.Duration) {
- m.methodDuration.With(prometheus.Labels{
- methodLabel: method,
- }).Observe(d.Seconds())
-}
-
-func (m *engineMetrics) AddToContainerSize(cnrID string, size int64) {
- m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size))
-}
-
-func (m *engineMetrics) DeleteContainerSize(cnrID string) {
- m.containerSize.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID})
-}
-
-func (m *engineMetrics) DeleteContainerCount(cnrID string) {
- m.contObjCounter.DeletePartialMatch(prometheus.Labels{containerIDLabelKey: cnrID})
-}
-
-func (m *engineMetrics) AddToPayloadCounter(shardID string, size int64) {
- m.payloadSize.With(prometheus.Labels{shardIDLabel: shardID}).Add(float64(size))
-}
-
-func (m *engineMetrics) IncErrorCounter(shardID string) {
- m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Inc()
-}
-
-func (m *engineMetrics) ClearErrorCounter(shardID string) {
- m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Set(0)
-}
-
-func (m *engineMetrics) DeleteShardMetrics(shardID string) {
- m.errorCounter.Delete(prometheus.Labels{shardIDLabel: shardID})
- m.payloadSize.Delete(prometheus.Labels{shardIDLabel: shardID})
- m.objectCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
- m.contObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
- m.refillObjCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
- m.refillPayloadCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
- m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
- m.mode.Delete(shardID)
- m.refillStatus.DeleteByShardID(shardID)
- m.evacuationInProgress.Delete(shardID)
-}
-
-func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
- m.objectCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- typeLabel: objectType,
- },
- ).Add(float64(delta))
-}
-
-func (m *engineMetrics) SetObjectCounter(shardID, objectType string, v uint64) {
- m.objectCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- typeLabel: objectType,
- },
- ).Set(float64(v))
-}
-
-func (m *engineMetrics) SetContainerObjectCounter(shardID, contID, objectType string, v uint64) {
- m.contObjCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- containerIDLabelKey: contID,
- typeLabel: objectType,
- },
- ).Set(float64(v))
-}
-
-func (m *engineMetrics) IncContainerObjectCounter(shardID, contID, objectType string) {
- m.contObjCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- containerIDLabelKey: contID,
- typeLabel: objectType,
- },
- ).Inc()
-}
-
-func (m *engineMetrics) SubContainerObjectCounter(shardID, contID, objectType string, v uint64) {
- m.contObjCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- containerIDLabelKey: contID,
- typeLabel: objectType,
- },
- ).Sub(float64(v))
-}
-
-func (m *engineMetrics) SetMode(shardID string, mode mode.Mode) {
- m.mode.SetMode(shardID, mode.String())
-}
-
-func (m *engineMetrics) WriteCache() WriteCacheMetrics {
- return m.writeCache
-}
-
-func (m *engineMetrics) GC() GCMetrics {
- return m.gc
-}
-
-func (m *engineMetrics) IncRefillObjectsCount(shardID, path string, size int, success bool) {
- m.refillObjCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- successLabel: strconv.FormatBool(success),
- },
- ).Inc()
- m.refillPayloadCounter.With(
- prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- successLabel: strconv.FormatBool(success),
- },
- ).Add(float64(size))
-}
-
-func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) {
- m.refillPercentCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- }).Set(float64(percent))
-}
-
-func (m *engineMetrics) SetRefillStatus(shardID, path, status string) {
- m.refillStatus.SetMode(shardID, path, status)
-}
-
-func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) {
- m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value))
-}
diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go
deleted file mode 100644
index 6b1f99d46..000000000
--- a/internal/metrics/multinet.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package metrics
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type multinetMetrics struct {
- dials *prometheus.GaugeVec
-}
-
-type MultinetMetrics interface {
- Dial(sourceIP string, success bool)
-}
-
-func newMultinetMetrics(ns string) *multinetMetrics {
- return &multinetMetrics{
- dials: metrics.NewGaugeVec(
- prometheus.GaugeOpts{
- Namespace: ns,
- Subsystem: multinetSubsystem,
- Name: "dial_count",
- Help: "Dials count performed by multinet",
- }, []string{sourceIPLabel, successLabel}),
- }
-}
-
-func (m *multinetMetrics) Dial(sourceIP string, success bool) {
- m.dials.With(prometheus.Labels{
- sourceIPLabel: sourceIP,
- successLabel: strconv.FormatBool(success),
- }).Inc()
-}
diff --git a/internal/metrics/policer.go b/internal/metrics/policer.go
deleted file mode 100644
index e4bdc944e..000000000
--- a/internal/metrics/policer.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package metrics
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type PolicerMetrics interface {
- IncProcessedObjects()
-}
-
-type policerMetrics struct {
- processedObjectsCounter prometheus.Counter
-}
-
-func newPolicerMetrics() *policerMetrics {
- return &policerMetrics{
- processedObjectsCounter: metrics.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: policerSubsystem,
- Name: "processed_objects_total",
- Help: "Total number of objects processed by policer",
- }),
- }
-}
-
-func (m *policerMetrics) IncProcessedObjects() {
- m.processedObjectsCounter.Inc()
-}
diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go
deleted file mode 100644
index be6878142..000000000
--- a/internal/metrics/qos.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package metrics
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type QoSMetrics struct {
- opsCounter *prometheus.GaugeVec
-}
-
-func newQoSMetrics() *QoSMetrics {
- return &QoSMetrics{
- opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: qosSubsystem,
- Name: "operations_total",
- Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
- }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
- }
-}
-
-func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "pending",
- }).Set(float64(pending))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "in_progress",
- }).Set(float64(inProgress))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "completed",
- }).Set(float64(completed))
- m.opsCounter.With(prometheus.Labels{
- shardIDLabel: shardID,
- operationLabel: operation,
- ioTagLabel: tag,
- typeLabel: "resource_exhausted",
- }).Set(float64(resourceExhausted))
-}
-
-func (m *QoSMetrics) Close(shardID string) {
- m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
-}
diff --git a/internal/net/config.go b/internal/net/config.go
deleted file mode 100644
index b84ac3b35..000000000
--- a/internal/net/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package net
-
-import (
- "errors"
- "fmt"
- "net/netip"
- "slices"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-var errEmptySourceIPList = errors.New("empty source IP list")
-
-type Subnet struct {
- Prefix string
- SourceIPs []string
-}
-
-type Config struct {
- Enabled bool
- Subnets []Subnet
- Balancer string
- Restrict bool
- FallbackDelay time.Duration
- Metrics metrics.MultinetMetrics
-}
-
-func (c Config) toMultinetConfig() (multinet.Config, error) {
- var subnets []multinet.Subnet
- for _, s := range c.Subnets {
- var ms multinet.Subnet
- p, err := netip.ParsePrefix(s.Prefix)
- if err != nil {
- return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
- }
- ms.Prefix = p
- for _, ip := range s.SourceIPs {
- addr, err := netip.ParseAddr(ip)
- if err != nil {
- return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
- }
- ms.SourceIPs = append(ms.SourceIPs, addr)
- }
- if len(ms.SourceIPs) == 0 {
- return multinet.Config{}, errEmptySourceIPList
- }
- subnets = append(subnets, ms)
- }
- return multinet.Config{
- Subnets: subnets,
- Balancer: multinet.BalancerType(c.Balancer),
- Restrict: c.Restrict,
- FallbackDelay: c.FallbackDelay,
- Dialer: newDefaulDialer(),
- EventHandler: newEventHandler(c.Metrics),
- }, nil
-}
-
-func (c Config) equals(other Config) bool {
- return c.Enabled == other.Enabled &&
- slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
- return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
- }) &&
- c.Balancer == other.Balancer &&
- c.Restrict == other.Restrict &&
- c.FallbackDelay == other.FallbackDelay
-}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
deleted file mode 100644
index 6265f1860..000000000
--- a/internal/net/dial_target.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
-
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package net
-
-import (
- "net/url"
- "strings"
-)
-
-// parseDialTarget returns the network and address to pass to dialer.
-func parseDialTarget(target string) (string, string) {
- net := "tcp"
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- return n, target[m1+1:]
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr := t.Path
- if scheme == "unix" {
- if addr == "" {
- addr = t.Host
- }
- return scheme, addr
- }
- }
- return net, target
-}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
deleted file mode 100644
index daf0f815f..000000000
--- a/internal/net/dialer.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package net
-
-import (
- "context"
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-type Dialer interface {
- DialContext(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
- return d.DialContext(ctx, "tcp", address)
-}
-
-func newDefaulDialer() net.Dialer {
- // From `grpc.WithContextDialer` comment:
- //
- // Note: All supported releases of Go (as of December 2023) override the OS
- // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
- // with OS defaults for keepalive time and interval, use a net.Dialer that sets
- // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
- // option to true from the Control field. For a concrete example of how to do
- // this, see internal.NetDialerWithTCPKeepalive().
- //
- // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
- return net.Dialer{
- KeepAlive: time.Duration(-1),
- Control: func(_, _ string, c syscall.RawConn) error {
- return c.Control(func(fd uintptr) {
- _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
- })
- },
- }
-}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
deleted file mode 100644
index 3d94dedc7..000000000
--- a/internal/net/dialer_source.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package net
-
-import (
- "context"
- "net"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-type DialerSource struct {
- guard sync.RWMutex
-
- c Config
-
- md multinet.Dialer
-}
-
-func NewDialerSource(c Config) (*DialerSource, error) {
- result := &DialerSource{}
- if err := result.build(c); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (s *DialerSource) build(c Config) error {
- if c.Enabled {
- mc, err := c.toMultinetConfig()
- if err != nil {
- return err
- }
- md, err := multinet.NewDialer(mc)
- if err != nil {
- return err
- }
- s.md = md
- s.c = c
- return nil
- }
- s.md = nil
- s.c = c
- return nil
-}
-
-// GrpcContextDialer returns grpc.WithContextDialer func.
-// Returns nil if multinet disabled.
-func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
- s.guard.RLock()
- defer s.guard.RUnlock()
-
- if s.c.Enabled {
- return func(ctx context.Context, address string) (net.Conn, error) {
- network, address := parseDialTarget(address)
- return s.md.DialContext(ctx, network, address)
- }
- }
- return nil
-}
-
-// NetContextDialer returns net.DialContext dial function.
-// Returns nil if multinet disabled.
-func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
- s.guard.RLock()
- defer s.guard.RUnlock()
-
- if s.c.Enabled {
- return func(ctx context.Context, network, address string) (net.Conn, error) {
- return s.md.DialContext(ctx, network, address)
- }
- }
- return nil
-}
-
-func (s *DialerSource) Update(c Config) error {
- s.guard.Lock()
- defer s.guard.Unlock()
-
- if s.c.equals(c) {
- return nil
- }
- return s.build(c)
-}
diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go
deleted file mode 100644
index 024e5cf7c..000000000
--- a/internal/net/event_handler.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package net
-
-import (
- "net"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- "git.frostfs.info/TrueCloudLab/multinet"
-)
-
-var _ multinet.EventHandler = (*metricsEventHandler)(nil)
-
-type metricsEventHandler struct {
- m metrics.MultinetMetrics
-}
-
-func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) {
- sourceIPString := "undefined"
- if sourceIP != nil {
- sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
- }
- m.m.Dial(sourceIPString, err == nil)
-}
-
-func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler {
- if m == nil {
- return nil
- }
- return &metricsEventHandler{m: m}
-}
diff --git a/internal/qos/config.go b/internal/qos/config.go
deleted file mode 100644
index d90b403b5..000000000
--- a/internal/qos/config.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package qos
-
-import (
- "math"
- "time"
-)
-
-const (
- NoLimit int64 = math.MaxInt64
- DefaultIdleTimeout = 5 * time.Minute
-)
-
-type LimiterConfig struct {
- Read OpConfig
- Write OpConfig
-}
-
-type OpConfig struct {
- MaxWaitingOps int64
- MaxRunningOps int64
- IdleTimeout time.Duration
- Tags []IOTagConfig
-}
-
-type IOTagConfig struct {
- Tag string
- Weight *float64
- LimitOps *float64
- ReservedOps *float64
- Prohibited bool
-}
diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go
deleted file mode 100644
index 58cd9e52c..000000000
--- a/internal/qos/grpc.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package qos
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "google.golang.org/grpc"
-)
-
-func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
- ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
- return handler(ctx, req)
- }
-}
-
-func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
- return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- rawTag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return invoker(ctx, method, req, reply, cc, opts...)
- }
- tag, err := FromRawString(rawTag)
- if err != nil {
- tag = IOTagClient
- }
- if tag.IsLocal() {
- tag = IOTagInternal
- }
- ctx = tagging.ContextWithIOTag(ctx, tag.String())
- return invoker(ctx, method, req, reply, cc, opts...)
- }
-}
-
-func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
- return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- rawTag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return streamer(ctx, desc, cc, method, opts...)
- }
- tag, err := FromRawString(rawTag)
- if err != nil {
- tag = IOTagClient
- }
- if tag.IsLocal() {
- tag = IOTagInternal
- }
- ctx = tagging.ContextWithIOTag(ctx, tag.String())
- return streamer(ctx, desc, cc, method, opts...)
- }
-}
-
-func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
- if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
- return handler(ctx, req)
- }
-
- release, ok := getLimiter().Acquire(info.FullMethod)
- if !ok {
- return nil, new(apistatus.ResourceExhausted)
- }
- defer release()
-
- return handler(ctx, req)
- }
-}
-
-//nolint:contextcheck (grpc.ServerStream manages the context itself)
-func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
- return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
- return handler(srv, ss)
- }
-
- release, ok := getLimiter().Acquire(info.FullMethod)
- if !ok {
- return new(apistatus.ResourceExhausted)
- }
- defer release()
-
- return handler(srv, ss)
- }
-}
diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go
deleted file mode 100644
index 7d0826754..000000000
--- a/internal/qos/grpc_test.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package qos_test
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc"
-)
-
-const (
- okKey = "ok"
-)
-
-var (
- errTest = errors.New("mock")
- errWrongTag = errors.New("wrong tag")
- errNoTag = errors.New("failed to get tag from context")
- errResExhausted *apistatus.ResourceExhausted
- tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
-)
-
-type mockGRPCServerStream struct {
- grpc.ServerStream
-
- ctx context.Context
-}
-
-func (m *mockGRPCServerStream) Context() context.Context {
- return m.ctx
-}
-
-type limiter struct {
- acquired bool
- released bool
-}
-
-func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
- l.acquired = true
- if key != okKey {
- return nil, false
- }
- return func() { l.released = true }, true
-}
-
-func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
- interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
- handler := func(ctx context.Context, req any) (any, error) {
- return nil, errTest
- }
- _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
- return err
-}
-
-func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
- interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
- handler := func(srv any, stream grpc.ServerStream) error {
- return errTest
- }
- err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
- FullMethod: methodName,
- }, handler)
- return err
-}
-
-func Test_MaxActiveRPCLimiter(t *testing.T) {
- // UnaryServerInterceptor
- t.Run("unary fail", func(t *testing.T) {
- var lim limiter
-
- err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
- require.ErrorAs(t, err, &errResExhausted)
- require.True(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("unary pass critical", func(t *testing.T) {
- var lim limiter
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
-
- err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
- require.ErrorIs(t, err, errTest)
- require.False(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("unary pass", func(t *testing.T) {
- var lim limiter
-
- err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
- require.ErrorIs(t, err, errTest)
- require.True(t, lim.acquired)
- require.True(t, lim.released)
- })
- // StreamServerInterceptor
- t.Run("stream fail", func(t *testing.T) {
- var lim limiter
-
- err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
- require.ErrorAs(t, err, &errResExhausted)
- require.True(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("stream pass critical", func(t *testing.T) {
- var lim limiter
- ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
-
- err := streamMaxActiveRPCLimiter(ctx, &lim, "")
- require.ErrorIs(t, err, errTest)
- require.False(t, lim.acquired)
- require.False(t, lim.released)
- })
- t.Run("stream pass", func(t *testing.T) {
- var lim limiter
-
- err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
- require.ErrorIs(t, err, errTest)
- require.True(t, lim.acquired)
- require.True(t, lim.released)
- })
-}
-
-func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
- interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
- called := false
- handler := func(ctx context.Context, req any) (any, error) {
- called = true
- if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
- return nil, nil
- }
- return nil, errWrongTag
- }
- _, err := interceptor(context.Background(), nil, nil, handler)
- require.NoError(t, err)
- require.True(t, called)
-}
-
-func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
- interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
-
- // check context with no value
- called := false
- invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
- called = true
- if _, ok := tagging.IOTagFromContext(ctx); ok {
- return fmt.Errorf("%v: expected no IO tags", errWrongTag)
- }
- return nil
- }
- require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
- require.True(t, called)
-
- // check context for internal tag
- targetTag := qos.IOTagInternal.String()
- invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
- raw, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return errNoTag
- }
- if raw != targetTag {
- return errWrongTag
- }
- return nil
- }
- for _, tag := range tags {
- ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
- require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
- }
-
- // check context for client tag
- ctx := tagging.ContextWithIOTag(context.Background(), "")
- targetTag = qos.IOTagClient.String()
- require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
-}
-
-func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
- interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
-
- // check context with no value
- called := false
- streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- called = true
- if _, ok := tagging.IOTagFromContext(ctx); ok {
- return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
- }
- return nil, nil
- }
- _, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
- require.True(t, called)
- require.NoError(t, err)
-
- // check context for internal tag
- targetTag := qos.IOTagInternal.String()
- streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- raw, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- return nil, errNoTag
- }
- if raw != targetTag {
- return nil, errWrongTag
- }
- return nil, nil
- }
- for _, tag := range tags {
- ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
- _, err := interceptor(ctx, nil, nil, "", streamer, nil)
- require.NoError(t, err)
- }
-
- // check context for client tag
- ctx := tagging.ContextWithIOTag(context.Background(), "")
- targetTag = qos.IOTagClient.String()
- _, err = interceptor(ctx, nil, nil, "", streamer, nil)
- require.NoError(t, err)
-}
diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go
deleted file mode 100644
index 2d7de32fc..000000000
--- a/internal/qos/limiter.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package qos
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-const (
- defaultIdleTimeout time.Duration = 0
- defaultShare float64 = 1.0
- minusOne = ^uint64(0)
-
- defaultMetricsCollectTimeout = 5 * time.Second
-)
-
-type ReleaseFunc scheduling.ReleaseFunc
-
-type Limiter interface {
- ReadRequest(context.Context) (ReleaseFunc, error)
- WriteRequest(context.Context) (ReleaseFunc, error)
- SetParentID(string)
- SetMetrics(Metrics)
- Close()
-}
-
-type scheduler interface {
- RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
- Close()
-}
-
-func NewLimiter(c LimiterConfig) (Limiter, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
- readScheduler, err := createScheduler(c.Read)
- if err != nil {
- return nil, fmt.Errorf("create read scheduler: %w", err)
- }
- writeScheduler, err := createScheduler(c.Write)
- if err != nil {
- return nil, fmt.Errorf("create write scheduler: %w", err)
- }
- l := &mClockLimiter{
- readScheduler: readScheduler,
- writeScheduler: writeScheduler,
- closeCh: make(chan struct{}),
- wg: &sync.WaitGroup{},
- readStats: createStats(),
- writeStats: createStats(),
- }
- l.shardID.Store(&shardID{})
- l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
- l.startMetricsCollect()
- return l, nil
-}
-
-func createScheduler(config OpConfig) (scheduler, error) {
- if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
- return newSemaphoreScheduler(config.MaxRunningOps), nil
- }
- return scheduling.NewMClock(
- uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
- converToSchedulingTags(config.Tags), config.IdleTimeout)
-}
-
-func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
- result := make(map[string]scheduling.TagInfo)
- for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
- result[tag.String()] = scheduling.TagInfo{
- Share: defaultShare,
- }
- }
- for _, l := range limits {
- v := result[l.Tag]
- if l.Weight != nil && *l.Weight != 0 {
- v.Share = *l.Weight
- }
- if l.LimitOps != nil && *l.LimitOps != 0 {
- v.LimitIOPS = l.LimitOps
- }
- if l.ReservedOps != nil && *l.ReservedOps != 0 {
- v.ReservedIOPS = l.ReservedOps
- }
- v.Prohibited = l.Prohibited
- result[l.Tag] = v
- }
- return result
-}
-
-var (
- _ Limiter = (*noopLimiter)(nil)
- releaseStub ReleaseFunc = func() {}
- noopLimiterInstance = &noopLimiter{}
-)
-
-func NewNoopLimiter() Limiter {
- return noopLimiterInstance
-}
-
-type noopLimiter struct{}
-
-func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
- return releaseStub, nil
-}
-
-func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
- return releaseStub, nil
-}
-
-func (n *noopLimiter) SetParentID(string) {}
-
-func (n *noopLimiter) Close() {}
-
-func (n *noopLimiter) SetMetrics(Metrics) {}
-
-var _ Limiter = (*mClockLimiter)(nil)
-
-type shardID struct {
- id string
-}
-
-type mClockLimiter struct {
- readScheduler scheduler
- writeScheduler scheduler
-
- readStats map[string]*stat
- writeStats map[string]*stat
-
- shardID atomic.Pointer[shardID]
- metrics atomic.Pointer[metricsHolder]
- closeCh chan struct{}
- wg *sync.WaitGroup
-}
-
-func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
- return requestArrival(ctx, n.readScheduler, n.readStats)
-}
-
-func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
- return requestArrival(ctx, n.writeScheduler, n.writeStats)
-}
-
-func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
- tag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- tag = IOTagClient.String()
- }
- stat := getStat(tag, stats)
- stat.pending.Add(1)
- if tag == IOTagCritical.String() {
- stat.inProgress.Add(1)
- return func() {
- stat.completed.Add(1)
- }, nil
- }
- rel, err := s.RequestArrival(ctx, tag)
- stat.inProgress.Add(1)
- if err != nil {
- if isResourceExhaustedErr(err) {
- stat.resourceExhausted.Add(1)
- return nil, &apistatus.ResourceExhausted{}
- }
- stat.completed.Add(1)
- return nil, err
- }
- return func() {
- rel()
- stat.completed.Add(1)
- }, nil
-}
-
-func (n *mClockLimiter) Close() {
- n.readScheduler.Close()
- n.writeScheduler.Close()
- close(n.closeCh)
- n.wg.Wait()
- n.metrics.Load().metrics.Close(n.shardID.Load().id)
-}
-
-func (n *mClockLimiter) SetParentID(parentID string) {
- n.shardID.Store(&shardID{id: parentID})
-}
-
-func (n *mClockLimiter) SetMetrics(m Metrics) {
- n.metrics.Store(&metricsHolder{metrics: m})
-}
-
-func (n *mClockLimiter) startMetricsCollect() {
- n.wg.Add(1)
- go func() {
- defer n.wg.Done()
-
- ticker := time.NewTicker(defaultMetricsCollectTimeout)
- defer ticker.Stop()
- for {
- select {
- case <-n.closeCh:
- return
- case <-ticker.C:
- shardID := n.shardID.Load().id
- if shardID == "" {
- continue
- }
- metrics := n.metrics.Load().metrics
- exportMetrics(metrics, n.readStats, shardID, "read")
- exportMetrics(metrics, n.writeStats, shardID, "write")
- }
- }
- }()
-}
-
-func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
- var pending uint64
- var inProgress uint64
- var completed uint64
- var resExh uint64
- for tag, s := range stats {
- pending = s.pending.Load()
- inProgress = s.inProgress.Load()
- completed = s.completed.Load()
- resExh = s.resourceExhausted.Load()
- if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
- continue
- }
- metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
- }
-}
-
-func isResourceExhaustedErr(err error) bool {
- return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
- errors.Is(err, errSemaphoreLimitExceeded) ||
- errors.Is(err, scheduling.ErrTagRequestsProhibited)
-}
diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go
deleted file mode 100644
index c00da51b7..000000000
--- a/internal/qos/metrics.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package qos
-
-import "sync/atomic"
-
-type Metrics interface {
- SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
- Close(shardID string)
-}
-
-var _ Metrics = (*noopMetrics)(nil)
-
-type noopMetrics struct{}
-
-func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
-}
-
-func (n *noopMetrics) Close(string) {}
-
-// stat presents limiter statistics cumulative counters.
-//
-// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
-type stat struct {
- completed atomic.Uint64
- pending atomic.Uint64
- resourceExhausted atomic.Uint64
- inProgress atomic.Uint64
-}
-
-type metricsHolder struct {
- metrics Metrics
-}
diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go
deleted file mode 100644
index 74e6928f3..000000000
--- a/internal/qos/semaphore.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package qos
-
-import (
- "context"
- "errors"
-
- qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
-)
-
-var (
- _ scheduler = (*semaphore)(nil)
- errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
-)
-
-type semaphore struct {
- s *qosSemaphore.Semaphore
-}
-
-func newSemaphoreScheduler(size int64) *semaphore {
- return &semaphore{
- s: qosSemaphore.NewSemaphore(size),
- }
-}
-
-func (s *semaphore) Close() {}
-
-func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- if s.s.Acquire() {
- return s.s.Release, nil
- }
- return nil, errSemaphoreLimitExceeded
-}
diff --git a/internal/qos/stats.go b/internal/qos/stats.go
deleted file mode 100644
index 3ecfad9f9..000000000
--- a/internal/qos/stats.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package qos
-
-const unknownStatsTag = "unknown"
-
-var statTags = map[string]struct{}{
- IOTagBackground.String(): {},
- IOTagClient.String(): {},
- IOTagCritical.String(): {},
- IOTagInternal.String(): {},
- IOTagPolicer.String(): {},
- IOTagTreeSync.String(): {},
- IOTagWritecache.String(): {},
- unknownStatsTag: {},
-}
-
-func createStats() map[string]*stat {
- result := make(map[string]*stat)
- for tag := range statTags {
- result[tag] = &stat{}
- }
- return result
-}
-
-func getStat(tag string, stats map[string]*stat) *stat {
- if v, ok := stats[tag]; ok {
- return v
- }
- return stats[unknownStatsTag]
-}
diff --git a/internal/qos/tags.go b/internal/qos/tags.go
deleted file mode 100644
index e3f7cafd6..000000000
--- a/internal/qos/tags.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package qos
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
-)
-
-type IOTag string
-
-const (
- IOTagBackground IOTag = "background"
- IOTagClient IOTag = "client"
- IOTagCritical IOTag = "critical"
- IOTagInternal IOTag = "internal"
- IOTagPolicer IOTag = "policer"
- IOTagTreeSync IOTag = "treesync"
- IOTagWritecache IOTag = "writecache"
-
- ioTagUnknown IOTag = ""
-)
-
-func FromRawString(s string) (IOTag, error) {
- switch s {
- case string(IOTagBackground):
- return IOTagBackground, nil
- case string(IOTagClient):
- return IOTagClient, nil
- case string(IOTagCritical):
- return IOTagCritical, nil
- case string(IOTagInternal):
- return IOTagInternal, nil
- case string(IOTagPolicer):
- return IOTagPolicer, nil
- case string(IOTagTreeSync):
- return IOTagTreeSync, nil
- case string(IOTagWritecache):
- return IOTagWritecache, nil
- default:
- return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
- }
-}
-
-func (t IOTag) String() string {
- return string(t)
-}
-
-func IOTagFromContext(ctx context.Context) string {
- tag, ok := tagging.IOTagFromContext(ctx)
- if !ok {
- tag = "undefined"
- }
- return tag
-}
-
-func (t IOTag) IsLocal() bool {
- return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
-}
diff --git a/internal/qos/validate.go b/internal/qos/validate.go
deleted file mode 100644
index 70f1f24e8..000000000
--- a/internal/qos/validate.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package qos
-
-import (
- "errors"
- "fmt"
- "math"
-)
-
-var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
-
-type tagConfig struct {
- Shares, Limit, Reserved *float64
-}
-
-func (c *LimiterConfig) Validate() error {
- if err := validateOpConfig(c.Read); err != nil {
- return fmt.Errorf("limits 'read' section validation error: %w", err)
- }
- if err := validateOpConfig(c.Write); err != nil {
- return fmt.Errorf("limits 'write' section validation error: %w", err)
- }
- return nil
-}
-
-func validateOpConfig(c OpConfig) error {
- if c.MaxRunningOps <= 0 {
- return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
- }
- if c.MaxWaitingOps <= 0 {
- return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
- }
- if c.IdleTimeout <= 0 {
- return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
- }
- if err := validateTags(c.Tags); err != nil {
- return fmt.Errorf("'tags' config section validation error: %w", err)
- }
- return nil
-}
-
-func validateTags(configTags []IOTagConfig) error {
- tags := map[IOTag]tagConfig{
- IOTagBackground: {},
- IOTagClient: {},
- IOTagInternal: {},
- IOTagPolicer: {},
- IOTagTreeSync: {},
- IOTagWritecache: {},
- }
- for _, t := range configTags {
- tag, err := FromRawString(t.Tag)
- if err != nil {
- return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
- }
- if _, ok := tags[tag]; !ok {
- return fmt.Errorf("tag %s is not configurable", t.Tag)
- }
- tags[tag] = tagConfig{
- Shares: t.Weight,
- Limit: t.LimitOps,
- Reserved: t.ReservedOps,
- }
- }
- idx := 0
- var shares float64
- for t, v := range tags {
- if idx == 0 {
- idx++
- shares = float64Value(v.Shares)
- } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
- return errWeightsMustBeSpecified
- }
- if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
- return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
- }
- if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
- return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
- }
- if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
- return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
- }
- }
- return nil
-}
-
-func float64Value(f *float64) float64 {
- if f == nil {
- return 0.0
- }
- return *f
-}
diff --git a/pkg/ape/chainbase/boltdb.go b/pkg/ape/chainbase/boltdb.go
deleted file mode 100644
index 005b3bd84..000000000
--- a/pkg/ape/chainbase/boltdb.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package chainbase
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "path/filepath"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "go.etcd.io/bbolt"
-)
-
-type boltLocalOverrideStorage struct {
- *cfg
-
- db *bbolt.DB
-}
-
-var chainBucket = []byte{0}
-
-var (
- // ErrRootBucketNotFound signals the database has not been properly initialized.
- ErrRootBucketNotFound = logicerr.New("root bucket not found")
-
- ErrGlobalNamespaceBucketNotFound = logicerr.New("global namespace bucket not found")
-
- ErrTargetTypeBucketNotFound = logicerr.New("target type bucket not found")
-
- ErrTargetNameBucketNotFound = logicerr.New("target name bucket not found")
-
- ErrBucketNotContainsChainID = logicerr.New("chain id not found in bucket")
-
- errChainIDIsNotSet = errors.New("chain ID is not set")
-)
-
-// NewBoltLocalOverrideDatabase returns storage wrapper for storing access policy engine
-// local overrides.
-//
-// chain storage (chainBucket):
-// -> global namespace bucket (nBucket):
-// --> target bucket (tBucket)
-// ---> target name (resource) bucket (rBucket):
-//
-// | Key | Value |
-// x---------------------x-------------------x
-// | chain id (string) | serialized chain |
-// x---------------------x-------------------x
-//
-//nolint:godot
-func NewBoltLocalOverrideDatabase(opts ...Option) LocalOverrideDatabase {
- c := defaultCfg()
-
- for i := range opts {
- opts[i](c)
- }
-
- return &boltLocalOverrideStorage{
- cfg: c,
- }
-}
-
-func (cs *boltLocalOverrideStorage) Init() error {
- return cs.db.Update(func(tx *bbolt.Tx) error {
- _, err := tx.CreateBucketIfNotExists(chainBucket)
- return err
- })
-}
-
-func (cs *boltLocalOverrideStorage) Open(context.Context) error {
- err := util.MkdirAllX(filepath.Dir(cs.path), cs.perm)
- if err != nil {
- return fmt.Errorf("can't create dir %s for the chain DB: %w", cs.path, err)
- }
-
- opts := *bbolt.DefaultOptions
- opts.NoSync = cs.noSync
- opts.Timeout = 100 * time.Millisecond
-
- cs.db, err = bbolt.Open(cs.path, cs.perm, &opts)
- if err != nil {
- return fmt.Errorf("can't open the chain DB: %w", err)
- }
-
- cs.db.MaxBatchSize = cs.maxBatchSize
- cs.db.MaxBatchDelay = cs.maxBatchDelay
-
- return nil
-}
-
-func (cs *boltLocalOverrideStorage) Close() error {
- var err error
- if cs.db != nil {
- err = cs.db.Close()
- }
- return err
-}
-
-func getTypeBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
- cbucket := tx.Bucket(chainBucket)
- if cbucket == nil {
- return nil, ErrRootBucketNotFound
- }
-
- nbucket := cbucket.Bucket([]byte(name))
- if nbucket == nil {
- return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
- }
- return nbucket.Bucket([]byte{byte(target.Type)}), nil
-}
-
-func normalizeTargetName(target *policyengine.Target) {
- if target.Type == policyengine.Namespace && target.Name == "" {
- target.Name = "root"
- }
-}
-
-func getTargetBucket(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
- typeBucket, err := getTypeBucket(tx, name, target)
- if err != nil {
- return nil, err
- }
- if typeBucket == nil {
- return nil, fmt.Errorf("%w: %w: %c", policyengine.ErrChainNotFound, ErrTargetTypeBucketNotFound, target.Type)
- }
-
- normalizeTargetName(&target)
- rbucket := typeBucket.Bucket([]byte(target.Name))
- if rbucket == nil {
- return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrTargetNameBucketNotFound, target.Name)
- }
- return rbucket, nil
-}
-
-func getTargetBucketCreateIfEmpty(tx *bbolt.Tx, name chain.Name, target policyengine.Target) (*bbolt.Bucket, error) {
- cbucket := tx.Bucket(chainBucket)
- if cbucket == nil {
- return nil, ErrRootBucketNotFound
- }
-
- nbucket := cbucket.Bucket([]byte(name))
- if nbucket == nil {
- var err error
- nbucket, err = cbucket.CreateBucket([]byte(name))
- if err != nil {
- return nil, fmt.Errorf("could not create a bucket for the global chain name %s: %w", name, err)
- }
- }
-
- typeBucket := nbucket.Bucket([]byte{byte(target.Type)})
- if typeBucket == nil {
- var err error
- typeBucket, err = nbucket.CreateBucket([]byte{byte(target.Type)})
- if err != nil {
- return nil, fmt.Errorf("could not create a bucket for the target type '%c': %w", target.Type, err)
- }
- }
-
- normalizeTargetName(&target)
- rbucket := typeBucket.Bucket([]byte(target.Name))
- if rbucket == nil {
- var err error
- rbucket, err = typeBucket.CreateBucket([]byte(target.Name))
- if err != nil {
- return nil, fmt.Errorf("could not create a bucket for the target name %s: %w", target.Name, err)
- }
- }
-
- return rbucket, nil
-}
-
-func (cs *boltLocalOverrideStorage) AddOverride(name chain.Name, target policyengine.Target, c *chain.Chain) (chain.ID, error) {
- if len(c.ID) == 0 {
- return chain.ID{}, errChainIDIsNotSet
- }
-
- serializedChain := c.Bytes()
-
- err := cs.db.Update(func(tx *bbolt.Tx) error {
- rbuck, err := getTargetBucketCreateIfEmpty(tx, name, target)
- if err != nil {
- return err
- }
- return rbuck.Put([]byte(c.ID), serializedChain)
- })
-
- return c.ID, err
-}
-
-func (cs *boltLocalOverrideStorage) GetOverride(name chain.Name, target policyengine.Target, chainID chain.ID) (*chain.Chain, error) {
- var serializedChain []byte
-
- if err := cs.db.View(func(tx *bbolt.Tx) error {
- rbuck, err := getTargetBucket(tx, name, target)
- if err != nil {
- return err
- }
- serializedChain = rbuck.Get([]byte(chainID))
- if serializedChain == nil {
- return fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrBucketNotContainsChainID, chainID)
- }
- serializedChain = bytes.Clone(serializedChain)
- return nil
- }); err != nil {
- return nil, err
- }
-
- c := &chain.Chain{}
- if err := c.DecodeBytes(serializedChain); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-func (cs *boltLocalOverrideStorage) RemoveOverride(name chain.Name, target policyengine.Target, chainID chain.ID) error {
- return cs.db.Update(func(tx *bbolt.Tx) error {
- rbuck, err := getTargetBucket(tx, name, target)
- if err != nil {
- return err
- }
- return rbuck.Delete([]byte(chainID))
- })
-}
-
-func (cs *boltLocalOverrideStorage) RemoveOverridesByTarget(name chain.Name, target policyengine.Target) error {
- return cs.db.Update(func(tx *bbolt.Tx) error {
- typeBucket, err := getTypeBucket(tx, name, target)
- if err != nil {
- return err
- }
- normalizeTargetName(&target)
- return typeBucket.DeleteBucket([]byte(target.Name))
- })
-}
-
-func (cs *boltLocalOverrideStorage) ListOverrides(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) {
- var serializedChains [][]byte
- var serializedChain []byte
- if err := cs.db.View(func(tx *bbolt.Tx) error {
- rbuck, err := getTargetBucket(tx, name, target)
- if err != nil {
- return err
- }
- return rbuck.ForEach(func(_, v []byte) error {
- serializedChain = bytes.Clone(v)
- serializedChains = append(serializedChains, serializedChain)
- return nil
- })
- }); err != nil {
- if errors.Is(err, policyengine.ErrChainNotFound) {
- return []*chain.Chain{}, nil
- }
- return nil, err
- }
- chains := make([]*chain.Chain, 0, len(serializedChains))
- for _, serializedChain = range serializedChains {
- c := &chain.Chain{}
- if err := c.DecodeBytes(serializedChain); err != nil {
- return nil, err
- }
- chains = append(chains, c)
- }
- return chains, nil
-}
-
-func (cs *boltLocalOverrideStorage) DropAllOverrides(name chain.Name) error {
- return cs.db.Update(func(tx *bbolt.Tx) error {
- cbucket := tx.Bucket(chainBucket)
- if cbucket == nil {
- return ErrRootBucketNotFound
- }
-
- nbucket := cbucket.Bucket([]byte(name))
- if nbucket == nil {
- return fmt.Errorf("%w: %w: global namespace %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
- }
-
- return tx.DeleteBucket([]byte(name))
- })
-}
-
-func (cs *boltLocalOverrideStorage) ListOverrideDefinedTargets(name chain.Name) ([]policyengine.Target, error) {
- var targets []policyengine.Target
- if err := cs.db.View(func(tx *bbolt.Tx) error {
- var err error
- targets, err = getTargets(tx, name)
- if err != nil {
- return err
- }
- return nil
- }); err != nil {
- return nil, err
- }
- return targets, nil
-}
-
-func getTargets(tx *bbolt.Tx, name chain.Name) ([]policyengine.Target, error) {
- var targets []policyengine.Target
- cbucket := tx.Bucket(chainBucket)
- if cbucket == nil {
- return nil, ErrRootBucketNotFound
- }
-
- nbucket := cbucket.Bucket([]byte(name))
- if nbucket == nil {
- return nil, fmt.Errorf("%w: %w: %s", policyengine.ErrChainNotFound, ErrGlobalNamespaceBucketNotFound, name)
- }
-
- if err := nbucket.ForEachBucket(func(k []byte) error {
- ttype := policyengine.TargetType(k[0])
- if err := nbucket.Bucket(k).ForEachBucket(func(k []byte) error {
- targets = append(targets, policyengine.Target{
- Type: ttype,
- Name: string(bytes.Clone(k)),
- })
- return nil
- }); err != nil {
- return err
- }
- return nil
- }); err != nil {
- return nil, err
- }
- return targets, nil
-}
diff --git a/pkg/ape/chainbase/inmemory.go b/pkg/ape/chainbase/inmemory.go
deleted file mode 100644
index 27712d959..000000000
--- a/pkg/ape/chainbase/inmemory.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package chainbase
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
-)
-
-type inmemoryLocalOverrideStorage struct {
- engine.LocalOverrideStorage
-}
-
-func NewInmemoryLocalOverrideDatabase() LocalOverrideDatabase {
- return &inmemoryLocalOverrideStorage{
- LocalOverrideStorage: inmemory.NewInmemoryLocalStorage(),
- }
-}
-
-func (cs *inmemoryLocalOverrideStorage) Init() error {
- return nil
-}
-
-func (cs *inmemoryLocalOverrideStorage) Open(_ context.Context) error {
- return nil
-}
-
-func (cs *inmemoryLocalOverrideStorage) Close() error {
- return nil
-}
diff --git a/pkg/ape/chainbase/interface.go b/pkg/ape/chainbase/interface.go
deleted file mode 100644
index ee445f22c..000000000
--- a/pkg/ape/chainbase/interface.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package chainbase
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
-)
-
-// DatabaseCore interface provides methods to initialize and manage local override storage
-// as database.
-type DatabaseCore interface {
- Init() error
- Open(context.Context) error
- Close() error
-}
-
-// LocalOverrideDatabase interface provides methods to manage local override storage
-// as database and as the APE's local override storage.
-type LocalOverrideDatabase interface {
- DatabaseCore
- engine.LocalOverrideStorage
-}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
deleted file mode 100644
index 590b7a885..000000000
--- a/pkg/ape/chainbase/option.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package chainbase
-
-import (
- "io/fs"
- "os"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-type Option func(*cfg)
-
-type cfg struct {
- path string
- perm fs.FileMode
- noSync bool
- maxBatchDelay time.Duration
- maxBatchSize int
-}
-
-func defaultCfg() *cfg {
- return &cfg{
- perm: os.ModePerm,
- maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- maxBatchSize: bbolt.DefaultMaxBatchSize,
- }
-}
-
-func WithPath(path string) Option {
- return func(c *cfg) {
- c.path = path
- }
-}
-
-func WithPerm(perm fs.FileMode) Option {
- return func(c *cfg) {
- c.perm = perm
- }
-}
-
-func WithNoSync(noSync bool) Option {
- return func(c *cfg) {
- c.noSync = noSync
- }
-}
-
-func WithMaxBatchDelay(maxBatchDelay time.Duration) Option {
- return func(c *cfg) {
- c.maxBatchDelay = maxBatchDelay
- }
-}
-
-func WithMaxBatchSize(maxBatchSize int) Option {
- return func(c *cfg) {
- c.maxBatchSize = maxBatchSize
- }
-}
diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go
deleted file mode 100644
index 8cbb1cce9..000000000
--- a/pkg/ape/contract_storage/proxy.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package contractstorage
-
-import (
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- policy_morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/wallet"
-)
-
-type ProxyAdaptedContractStorage interface {
- AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error)
-
- RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error)
-
- ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error)
-}
-
-var _ ProxyAdaptedContractStorage = (engine.MorphRuleChainStorage)(nil)
-
-type RPCActorProvider interface {
- GetRPCActor() actor.RPCActor
-}
-
-// ProxyVerificationContractStorage uses decorated MorphRuleChainStorage with actor where cosigner is a proxy contract.
-type ProxyVerificationContractStorage struct {
- rpcActorProvider RPCActorProvider
-
- cosigners []actor.SignerAccount
-
- policyScriptHash util.Uint160
-}
-
-var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil)
-
-func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage {
- acc := wallet.NewAccountFromPrivateKey(key)
- return &ProxyVerificationContractStorage{
- rpcActorProvider: rpcActorProvider,
-
- cosigners: []actor.SignerAccount{
- {
- Signer: transaction.Signer{
- Account: proxyScriptHash,
- Scopes: transaction.CustomContracts,
- AllowedContracts: []util.Uint160{policyScriptHash},
- },
- Account: notary.FakeContractAccount(proxyScriptHash),
- },
- {
- Signer: transaction.Signer{
- Account: acc.Contract.ScriptHash(),
- Scopes: transaction.CalledByEntry,
- },
- Account: acc,
- },
- },
-
- policyScriptHash: policyScriptHash,
- }
-}
-
-// contractStorageActorAdapter adapats *actor.Actor to policy_morph.ContractStorageActor interface.
-type contractStorageActorAdapter struct {
- *actor.Actor
- rpcActor invoker.RPCInvoke
-}
-
-func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke {
- return n.rpcActor
-}
-
-func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) {
- rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
- act, err := actor.New(rpcActor, contractStorage.cosigners)
- if err != nil {
- return nil, err
- }
- return &contractStorageActorAdapter{
- Actor: act,
- rpcActor: rpcActor,
- }, nil
-}
-
-// AddMorphRuleChain add morph rule chain to Policy contract using both Proxy contract and storage account as consigners.
-func (contractStorage *ProxyVerificationContractStorage) AddMorphRuleChain(name chain.Name, target engine.Target, c *chain.Chain) (util.Uint256, uint32, error) {
- // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
- // ProxyVerificationContractStorage does not manage reconnections.
- contractStorageActor, err := contractStorage.newContractStorageActor()
- if err != nil {
- return util.Uint256{}, 0, err
- }
- return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).AddMorphRuleChain(name, target, c)
-}
-
-// RemoveMorphRuleChain removes morph rule chain from Policy contract using both Proxy contract and storage account as consigners.
-func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(name chain.Name, target engine.Target, chainID chain.ID) (util.Uint256, uint32, error) {
- // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but
- // ProxyVerificationContractStorage does not manage reconnections.
- contractStorageActor, err := contractStorage.newContractStorageActor()
- if err != nil {
- return util.Uint256{}, 0, err
- }
- return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).RemoveMorphRuleChain(name, target, chainID)
-}
-
-// ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners.
-func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) {
- rpcActor := contractStorage.rpcActorProvider.GetRPCActor()
- inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor}
- return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target)
-}
-
-type invokerAdapter struct {
- *invoker.Invoker
- rpcInvoker invoker.RPCInvoke
-}
-
-func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
- return n.rpcInvoker
-}
diff --git a/pkg/ape/converter/converter.go b/pkg/ape/converter/converter.go
deleted file mode 100644
index 9032680af..000000000
--- a/pkg/ape/converter/converter.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package converter
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
-)
-
-func SchemaRoleFromACLRole(role acl.Role) (string, error) {
- switch role {
- case acl.RoleOwner:
- return nativeschema.PropertyValueContainerRoleOwner, nil
- case acl.RoleContainer:
- return nativeschema.PropertyValueContainerRoleContainer, nil
- case acl.RoleInnerRing:
- return nativeschema.PropertyValueContainerRoleIR, nil
- case acl.RoleOthers:
- return nativeschema.PropertyValueContainerRoleOthers, nil
- default:
- return "", fmt.Errorf("failed to convert %s", role.String())
- }
-}
-
-func SchemaMethodFromACLOperation(op acl.Op) (string, error) {
- switch op {
- case acl.OpObjectGet:
- return nativeschema.MethodGetObject, nil
- case acl.OpObjectHead:
- return nativeschema.MethodHeadObject, nil
- case acl.OpObjectPut:
- return nativeschema.MethodPutObject, nil
- case acl.OpObjectDelete:
- return nativeschema.MethodDeleteObject, nil
- case acl.OpObjectSearch:
- return nativeschema.MethodSearchObject, nil
- case acl.OpObjectRange:
- return nativeschema.MethodRangeObject, nil
- case acl.OpObjectHash:
- return nativeschema.MethodHashObject, nil
- default:
- return "", fmt.Errorf("operation cannot be converted: %d", op)
- }
-}
diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go
deleted file mode 100644
index d32bd4a07..000000000
--- a/pkg/ape/request/frostfsid.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package request
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
-
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
-func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
- reqProps := make(map[string]string)
- subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
- if err != nil {
- if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return nil, fmt.Errorf("get subject error: %w", err)
- }
- return reqProps, nil
- }
- for k, v := range subj.KV {
- propertyKey := fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, k)
- reqProps[propertyKey] = v
- }
-
- groups := make([]string, len(subj.Groups))
- for i, group := range subj.Groups {
- groups[i] = strconv.FormatInt(group.ID, 10)
- }
- reqProps[commonschema.PropertyKeyFrostFSIDGroupID] = apechain.FormCondSliceContainsValue(groups)
-
- return reqProps, nil
-}
-
-// Groups return the actor's group ids from frostfsid contract.
-func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
- subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash())
- if err != nil {
- if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return nil, fmt.Errorf("get subject error: %w", err)
- }
- return []string{}, nil
- }
- groups := make([]string, len(subj.Groups))
- for i, group := range subj.Groups {
- groups[i] = strconv.FormatInt(group.ID, 10)
- }
- return groups, nil
-}
diff --git a/pkg/ape/request/request.go b/pkg/ape/request/request.go
deleted file mode 100644
index de67dea23..000000000
--- a/pkg/ape/request/request.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package request
-
-import (
- aperesource "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
-)
-
-type Request struct {
- operation string
- resource Resource
- properties map[string]string
-}
-
-func NewRequest(operation string, resource Resource, properties map[string]string) Request {
- return Request{
- operation: operation,
- resource: resource,
- properties: properties,
- }
-}
-
-var _ aperesource.Request = Request{}
-
-func (r Request) Operation() string {
- return r.operation
-}
-
-func (r Request) Property(key string) string {
- return r.properties[key]
-}
-
-func (r Request) Resource() aperesource.Resource {
- return r.resource
-}
-
-type Resource struct {
- name string
- properties map[string]string
-}
-
-var _ aperesource.Resource = Resource{}
-
-func NewResource(name string, properties map[string]string) Resource {
- return Resource{
- name: name,
- properties: properties,
- }
-}
-
-func (r Resource) Name() string {
- return r.name
-}
-
-func (r Resource) Property(key string) string {
- return r.properties[key]
-}
diff --git a/pkg/ape/router/bearer_overrides.go b/pkg/ape/router/bearer_overrides.go
deleted file mode 100644
index 2bc8ad614..000000000
--- a/pkg/ape/router/bearer_overrides.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package router
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
-)
-
-func newTarget(ct ape.ChainTarget) (policyengine.Target, error) {
- var target policyengine.Target
- switch ct.TargetType {
- case ape.TargetTypeContainer:
- var cid cidSDK.ID
- err := cid.DecodeString(ct.Name)
- if err != nil {
- return target, fmt.Errorf("invalid cid format: %s", target.Name)
- }
- target.Type = policyengine.Container
- case ape.TargetTypeGroup:
- target.Type = policyengine.Group
- case ape.TargetTypeNamespace:
- target.Type = policyengine.Namespace
- case ape.TargetTypeUser:
- target.Type = policyengine.User
- default:
- return target, fmt.Errorf("unsupported target type: %v", ct.TargetType)
- }
- target.Name = ct.Name
- return target, nil
-}
-
-type morphReaderDecorator struct {
- policyengine.MorphRuleChainStorageReader
-
- bearerTokenTarget policyengine.Target
-
- bearerTokenChains []*chain.Chain
-}
-
-func newMorphReaderDecorator(r policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (*morphReaderDecorator, error) {
- if r == nil {
- return nil, errors.New("empty morph chain rule reader")
- }
- t, err := newTarget(override.Target)
- if err != nil {
- return nil, err
- }
-
- bearerTokenChains := make([]*chain.Chain, len(override.Chains))
- for i := range override.Chains {
- chain := new(chain.Chain)
- if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil {
- return nil, fmt.Errorf("invalid ape chain: %w", err)
- }
- bearerTokenChains[i] = chain
- }
-
- return &morphReaderDecorator{
- MorphRuleChainStorageReader: r,
- bearerTokenTarget: t,
- bearerTokenChains: bearerTokenChains,
- }, nil
-}
-
-func (m *morphReaderDecorator) ListMorphRuleChains(name chain.Name, target policyengine.Target) ([]*chain.Chain, error) {
- if len(m.bearerTokenChains) > 0 && m.bearerTokenTarget.Type == target.Type {
- if m.bearerTokenTarget.Name != target.Name {
- return nil, fmt.Errorf("unexpected bearer token target: %s", m.bearerTokenTarget.Name)
- }
- return m.bearerTokenChains, nil
- }
- return m.MorphRuleChainStorageReader.ListMorphRuleChains(name, target)
-}
-
-// BearerChainFeedRouter creates a chain router emplacing bearer token rule chains.
-// Bearer token chains override only container target chains within Policy contract. This means the order of checking
-// is as follows:
-//
-// 1. Local overrides;
-// 2. Policy contract chains for a namespace target (as namespace chains have higher priority);
-// 3. Bearer token chains for a container target - if they're not defined, then it checks Policy contract chains;
-// 4. Checks for the remaining targets.
-func BearerChainFeedRouter(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, override bearer.APEOverride) (policyengine.ChainRouter, error) {
- mr, err := newMorphReaderDecorator(morphChainStorage, override)
- if err != nil {
- return nil, fmt.Errorf("create morph reader with bearer override error: %w", err)
- }
- return policyengine.NewDefaultChainRouterWithLocalOverrides(mr, localOverrideStorage), nil
-}
diff --git a/pkg/ape/router/bearer_overrides_test.go b/pkg/ape/router/bearer_overrides_test.go
deleted file mode 100644
index 3c12ee6fa..000000000
--- a/pkg/ape/router/bearer_overrides_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package router_test
-
-import (
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
- apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- resourcetest "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource/testutil"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-const (
- container = "67ETTZzbzJC6WxdQhHHHsJNCttVMBqYrSoFaUFVDNfiX"
- rootNs = ""
-)
-
-var (
- allowBySourceIP = &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}},
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: "SourceIP",
- Value: "10.122.1.20",
- },
- },
- },
- },
- }
-
- denyBySourceIP = &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)}},
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: "SourceIP",
- Value: "10.122.1.20",
- },
- },
- },
- },
- }
-)
-
-func TestBearerChainFedRouter(t *testing.T) {
- t.Run("no bearer token overrides", func(t *testing.T) {
- inmem := inmemory.NewInMemoryLocalOverrides()
-
- inmem.LocalStorage().AddOverride(chain.Ingress, engine.ContainerTarget(container), denyBySourceIP)
- inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
-
- _, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bearerSDK.APEOverride{})
- require.Error(t, err)
- })
- t.Run("allow by container with deny by bearer overrides", func(t *testing.T) {
- inmem := inmemory.NewInMemoryLocalOverrides()
-
- inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
-
- bt := bearerSDK.APEOverride{
- Target: apeSDK.ChainTarget{
- TargetType: apeSDK.TargetTypeContainer,
- Name: container,
- },
- Chains: []apeSDK.Chain{{
- Raw: denyBySourceIP.Bytes(),
- }},
- }
-
- r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
- require.NoError(t, err)
-
- req := resourcetest.NewRequest(nativeschema.MethodPutObject,
- resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
- map[string]string{
- "SourceIP": "10.122.1.20",
- "Actor": "someOwner",
- },
- )
-
- st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, st, chain.AccessDenied)
- })
- t.Run("allow by namespace with deny by bearer overrides", func(t *testing.T) {
- inmem := inmemory.NewInMemoryLocalOverrides()
-
- inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(container), allowBySourceIP)
- inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), allowBySourceIP)
-
- bt := bearerSDK.APEOverride{
- Target: apeSDK.ChainTarget{
- TargetType: apeSDK.TargetTypeContainer,
- Name: container,
- },
- Chains: []apeSDK.Chain{{
- Raw: denyBySourceIP.Bytes(),
- }},
- }
-
- r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
- require.NoError(t, err)
-
- req := resourcetest.NewRequest(nativeschema.MethodPutObject,
- resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
- map[string]string{
- "SourceIP": "10.122.1.20",
- "Actor": "someOwner",
- },
- )
-
- st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, st, chain.AccessDenied)
- })
- t.Run("deny by namespace with allow by bearer overrides", func(t *testing.T) {
- inmem := inmemory.NewInMemoryLocalOverrides()
-
- inmem.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(rootNs), denyBySourceIP)
-
- bt := bearerSDK.APEOverride{
- Target: apeSDK.ChainTarget{
- TargetType: apeSDK.TargetTypeContainer,
- Name: container,
- },
- Chains: []apeSDK.Chain{{
- Raw: allowBySourceIP.Bytes(),
- }},
- }
-
- r, err := router.BearerChainFeedRouter(inmem.LocalStorage(), inmem.MorphRuleChainStorage(), bt)
- require.NoError(t, err)
-
- req := resourcetest.NewRequest(nativeschema.MethodPutObject,
- resourcetest.NewResource(fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container), map[string]string{}),
- map[string]string{
- "SourceIP": "10.122.1.20",
- "Actor": "someOwner",
- },
- )
-
- st, found, err := r.IsAllowed(chain.Ingress, engine.NewRequestTarget(rootNs, container), req)
- require.NoError(t, err)
- require.True(t, found)
- require.Equal(t, st, chain.AccessDenied)
- })
-}
diff --git a/pkg/ape/router/single_pass.go b/pkg/ape/router/single_pass.go
deleted file mode 100644
index ec9244bae..000000000
--- a/pkg/ape/router/single_pass.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package router
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
-)
-
-// SingleUseRouterWithBearerTokenChains creates chain router with inmemory storage implementation and
-// fed with APE chains defined in Bearer token.
-func SingleUseRouterWithBearerTokenChains(overrides []bearer.APEOverride) (engine.ChainRouter, error) {
- storage := inmemory.NewInmemoryMorphRuleChainStorage()
- for _, override := range overrides {
- target, err := newTarget(override.Target)
- if err != nil {
- return nil, err
- }
- for i := range override.Chains {
- chain := new(apechain.Chain)
- if err := chain.DecodeBytes(override.Chains[i].Raw); err != nil {
- return nil, fmt.Errorf("invalid ape chain: %w", err)
- }
- _, _, _ = storage.AddMorphRuleChain(apechain.Ingress, target, chain)
- }
- }
- return engine.NewDefaultChainRouter(storage), nil
-}
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index 98bdf99e7..8c92901f2 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -3,14 +3,15 @@ package client
import (
"context"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
// Client is an interface of FrostFS storage
// node's client.
type Client interface {
+ ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error)
ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error)
ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error)
ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error)
diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go
index 91ee5c6c3..80c8f49b7 100644
--- a/pkg/core/client/util.go
+++ b/pkg/core/client/util.go
@@ -3,7 +3,6 @@ package client
import (
"bytes"
"fmt"
- "iter"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -20,11 +19,10 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro
// Args must not be nil.
func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
- Addresses() iter.Seq[string]
+ IterateAddresses(func(string) bool)
NumberOfAddresses() int
ExternalAddresses() []string
-},
-) error {
+}) error {
var a network.AddressGroup
err := a.FromIterator(info)
@@ -51,8 +49,7 @@ func NodeInfoFromNetmapElement(dst *NodeInfo, info interface {
PublicKey() []byte
Addresses() network.AddressGroup
ExternalAddresses() network.AddressGroup
-},
-) {
+}) {
nodeInfoFromKeyAddr(dst, info.PublicKey(), info.Addresses(), info.ExternalAddresses())
}
diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go
index 8c14bdf5e..8e0aaebb9 100644
--- a/pkg/core/container/delete.go
+++ b/pkg/core/container/delete.go
@@ -1,7 +1,7 @@
package container
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
diff --git a/pkg/core/container/ec.go b/pkg/core/container/ec.go
deleted file mode 100644
index 1acb87f2b..000000000
--- a/pkg/core/container/ec.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
-)
-
-// IsECContainer returns True if container has erasure coding policy.
-func IsECContainer(cnr containerSDK.Container) bool {
- return policy.IsECPlacement(cnr.PlacementPolicy())
-}
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
deleted file mode 100644
index 1c52d93e7..000000000
--- a/pkg/core/container/info.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package container
-
-import (
- "context"
- "sync"
-
- utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
-)
-
-type Info struct {
- Indexed bool
- Removed bool
-}
-
-type infoValue struct {
- info Info
- err error
-}
-
-type InfoProvider interface {
- Info(ctx context.Context, id cid.ID) (Info, error)
-}
-
-type infoProvider struct {
- mtx *sync.RWMutex
- cache map[cid.ID]infoValue
- kl *utilSync.KeyLocker[cid.ID]
-
- source Source
- sourceErr error
- sourceOnce *sync.Once
- sourceFactory func() (Source, error)
-}
-
-func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
- return &infoProvider{
- mtx: &sync.RWMutex{},
- cache: make(map[cid.ID]infoValue),
- sourceOnce: &sync.Once{},
- kl: utilSync.NewKeyLocker[cid.ID](),
- sourceFactory: sourceFactory,
- }
-}
-
-func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) {
- v, found := r.tryGetFromCache(id)
- if found {
- return v.info, v.err
- }
-
- return r.getFromSource(ctx, id)
-}
-
-func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- value, found := r.cache[id]
- return value, found
-}
-
-func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) {
- r.kl.Lock(id)
- defer r.kl.Unlock(id)
-
- if v, ok := r.tryGetFromCache(id); ok {
- return v.info, v.err
- }
-
- r.sourceOnce.Do(func() {
- r.source, r.sourceErr = r.sourceFactory()
- })
- if r.sourceErr != nil {
- return Info{}, r.sourceErr
- }
-
- cnr, err := r.source.Get(ctx, id)
- var civ infoValue
- if err != nil {
- if client.IsErrContainerNotFound(err) {
- removed, err := WasRemoved(ctx, r.source, id)
- if err != nil {
- civ.err = err
- } else {
- civ.info.Removed = removed
- }
- } else {
- civ.err = err
- }
- } else {
- civ.info.Indexed = IsIndexedContainer(cnr.Value)
- }
- r.putToCache(id, civ)
- return civ.info, civ.err
-}
-
-func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.cache[id] = ct
-}
diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go
index 4eb14e53c..0766ced31 100644
--- a/pkg/core/container/storage.go
+++ b/pkg/core/container/storage.go
@@ -1,14 +1,11 @@
package container
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
// Container groups information about the FrostFS container stored in the FrostFS network.
@@ -26,10 +23,10 @@ type Container struct {
// DelInfo contains info about removed container.
type DelInfo struct {
// Container owner.
- Owner user.ID
+ Owner []byte
// Epoch indicates when the container was removed.
- Epoch uint64
+ Epoch int
}
// Source is an interface that wraps
@@ -43,9 +40,9 @@ type Source interface {
//
// Implementations must not retain the container pointer and modify
// the container through it.
- Get(ctx context.Context, cid cid.ID) (*Container, error)
+ Get(cid.ID) (*Container, error)
- DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error)
+ DeletionInfo(cid.ID) (*DelInfo, error)
}
// EACL groups information about the FrostFS container's extended ACL stored in
@@ -60,3 +57,16 @@ type EACL struct {
// Session within which Value was set. Nil means session absence.
Session *session.Container
}
+
+// EACLSource is the interface that wraps
+// basic methods of extended ACL table source.
+type EACLSource interface {
+ // GetEACL reads the table from the source by identifier.
+ // It returns any error encountered.
+ //
+ // GetEACL must return exactly one non-nil value.
+ //
+ // Must return apistatus.ErrEACLNotFound if requested
+ // eACL table is not in source.
+ GetEACL(cid.ID) (*EACL, error)
+}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
index 61c568052..98919284e 100644
--- a/pkg/core/container/util.go
+++ b/pkg/core/container/util.go
@@ -1,18 +1,16 @@
package container
import (
- "context"
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch.
-func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
- _, err := s.DeletionInfo(ctx, cid)
+func WasRemoved(s Source, cid cid.ID) (bool, error) {
+ _, err := s.DeletionInfo(cid)
if err == nil {
return true, nil
}
@@ -22,14 +20,3 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) {
}
return false, err
}
-
-// IsIndexedContainer returns True if container attributes should be indexed.
-func IsIndexedContainer(cnr containerSDK.Container) bool {
- var isS3Container bool
- for key := range cnr.Attributes() {
- if key == ".s3-location-constraint" {
- isS3Container = true
- }
- }
- return !isS3Container
-}
diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go
deleted file mode 100644
index e752043d3..000000000
--- a/pkg/core/frostfsid/subject_provider.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package frostfsid
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-const (
- SubjectNotFoundErrorMessage = "subject not found"
-)
-
-// SubjectProvider interface provides methods to get subject from FrostfsID contract.
-type SubjectProvider interface {
- GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error)
- GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error)
-}
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 0c64bb798..29cb2dc94 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // IsLocalKey checks if the key was announced by a local node.
+ // Checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go
index e58e42634..17fccc620 100644
--- a/pkg/core/netmap/nodes.go
+++ b/pkg/core/netmap/nodes.go
@@ -1,10 +1,6 @@
package netmap
-import (
- "iter"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
// Node is a named type of netmap.NodeInfo which provides interface needed
// in the current repository. Node is expected to be used everywhere instead
@@ -18,17 +14,12 @@ func (x Node) PublicKey() []byte {
return (netmap.NodeInfo)(x).PublicKey()
}
-// Addresses returns an iterator over all announced network addresses.
-func (x Node) Addresses() iter.Seq[string] {
- return (netmap.NodeInfo)(x).NetworkEndpoints()
-}
-
// IterateAddresses iterates over all announced network addresses
// and passes them into f. Handler MUST NOT be nil.
-// Deprecated: use [Node.Addresses] instead.
func (x Node) IterateAddresses(f func(string) bool) {
- for s := range (netmap.NodeInfo)(x).NetworkEndpoints() {
- if f(s) {
+ (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
+ for _, addr := range (netmap.NodeInfo)(x).ExternalAddresses() {
+ if f(addr) {
return
}
}
diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go
index 97313da84..7770c61c7 100644
--- a/pkg/core/netmap/storage.go
+++ b/pkg/core/netmap/storage.go
@@ -1,8 +1,6 @@
package netmap
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -18,7 +16,7 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error)
+ GetNetMap(diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered.
@@ -27,21 +25,21 @@ type Source interface {
//
// Implementations must not retain the network map pointer and modify
// the network map through it.
- GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error)
+ GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered.
//
// Must return exactly one non-default value.
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
// GetLatestNetworkMap requests and returns the latest network map from the storage.
-func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(ctx, 0)
+func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(0)
}
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
-func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) {
- return src.GetNetMap(ctx, 1)
+func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
+ return src.GetNetMap(1)
}
diff --git a/pkg/core/object/address.go b/pkg/core/object/address.go
new file mode 100644
index 000000000..12e5c89ce
--- /dev/null
+++ b/pkg/core/object/address.go
@@ -0,0 +1,13 @@
+package object
+
+import (
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+// AddressWithType groups object address with its FrostFS
+// object type.
+type AddressWithType struct {
+ Address oid.Address
+ Type objectSDK.Type
+}
diff --git a/pkg/core/object/ec.go b/pkg/core/object/ec.go
deleted file mode 100644
index 549ff7cd3..000000000
--- a/pkg/core/object/ec.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package object
-
-import (
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-// IsECSupported returns True if EC supported for object.
-//
-// EC supported only for regular, not linking objects.
-func IsECSupported(obj *objectSDK.Object) bool {
- return obj.Type() == objectSDK.TypeRegular &&
- len(obj.Children()) == 0
-}
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index cf090eb37..e65767723 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -8,12 +8,13 @@ import (
"fmt"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -40,12 +41,30 @@ type cfg struct {
verifyTokenIssuer bool
}
+// DeleteHandler is an interface of delete queue processor.
+type DeleteHandler interface {
+ // DeleteObjects places objects to a removal queue.
+ //
+ // Returns apistatus.LockNonRegularObject if at least one object
+ // is locked.
+ DeleteObjects(oid.Address, ...oid.Address) error
+}
+
// LockSource is a source of lock relations between the objects.
type LockSource interface {
// IsLocked must clarify object's lock status.
IsLocked(ctx context.Context, address oid.Address) (bool, error)
}
+// Locker is an object lock storage interface.
+type Locker interface {
+ // Lock list of objects as locked by locker in the specified container.
+ //
+ // Returns apistatus.LockNonRegularObject if at least object in locked
+ // list is irregular (not type of REGULAR).
+ Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
+}
+
var errNilObject = errors.New("object is nil")
var errNilID = errors.New("missing identifier")
@@ -105,22 +124,15 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return fmt.Errorf("invalid attributes: %w", err)
}
- exp, err := expirationEpochAttribute(obj)
- if err != nil {
- if !errors.Is(err, errNoExpirationEpoch) {
- return fmt.Errorf("object did not pass expiration check: %w", err)
- }
- } else if !unprepared && exp < v.netState.CurrentEpoch() {
- if err := v.checkIfExpired(ctx, obj); err != nil {
- return fmt.Errorf("object did not pass expiration check: %w", err)
- }
- }
-
if !unprepared {
- if err := v.validateSignatureKey(ctx, obj); err != nil {
+ if err := v.validateSignatureKey(obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
+ if err := v.checkExpiration(ctx, obj); err != nil {
+ return fmt.Errorf("object did not pass expiration check: %w", err)
+ }
+
if err := objectSDK.CheckHeaderVerificationFields(obj); err != nil {
return fmt.Errorf("(%T) could not validate header fields: %w", v, err)
}
@@ -134,7 +146,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return nil
}
-func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
+func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
return errMissingSignature
@@ -153,31 +165,19 @@ func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectS
}
token := obj.SessionToken()
- ownerID := obj.OwnerID()
-
- if token == nil && obj.ECHeader() != nil {
- role, err := v.isIROrContainerNode(ctx, obj, binKey)
- if err != nil {
- return err
- }
- if role == acl.RoleContainer {
- // EC part could be restored or created by container node, so ownerID could not match object signature
- return nil
- }
- return v.checkOwnerKey(ownerID, key)
- }
+ ownerID := *obj.OwnerID()
if token == nil || !token.AssertAuthKey(&key) {
return v.checkOwnerKey(ownerID, key)
}
if v.verifyTokenIssuer {
- role, err := v.isIROrContainerNode(ctx, obj, binKey)
+ signerIsIROrContainerNode, err := v.isIROrContainerNode(obj, binKey)
if err != nil {
return err
}
- if role == acl.RoleContainer || role == acl.RoleInnerRing {
+ if signerIsIROrContainerNode {
return nil
}
@@ -190,25 +190,25 @@ func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectS
return nil
}
-func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
+func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (bool, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
- return acl.RoleOthers, errNilCID
+ return false, errNilCID
}
cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin)
- cnr, err := v.containers.Get(ctx, cnrID)
+ cnr, err := v.containers.Get(cnrID)
if err != nil {
- return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
+ return false, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
- res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
+ res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
if err != nil {
- return acl.RoleOthers, err
+ return false, err
}
- return res.Role, nil
+ return res.Role == acl.RoleContainer || res.Role == acl.RoleInnerRing, nil
}
func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error {
@@ -336,24 +336,35 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *objectSDK.Object, meta
var errExpired = errors.New("object has expired")
-func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Object) error {
- // an object could be expired but locked;
- // put such an object is a correct operation
-
- cID, _ := obj.ContainerID()
- oID, _ := obj.ID()
-
- var addr oid.Address
- addr.SetContainer(cID)
- addr.SetObject(oID)
-
- locked, err := v.e.IsLocked(ctx, addr)
+func (v *FormatValidator) checkExpiration(ctx context.Context, obj *objectSDK.Object) error {
+ exp, err := expirationEpochAttribute(obj)
if err != nil {
- return fmt.Errorf("locking status check for an expired object: %w", err)
+ if errors.Is(err, errNoExpirationEpoch) {
+ return nil // objects without expiration attribute are valid
+ }
+
+ return err
}
- if !locked {
- return errExpired
+ if exp < v.netState.CurrentEpoch() {
+ // an object could be expired but locked;
+ // put such an object is a correct operation
+
+ cID, _ := obj.ContainerID()
+ oID, _ := obj.ID()
+
+ var addr oid.Address
+ addr.SetContainer(cID)
+ addr.SetObject(oID)
+
+ locked, err := v.e.IsLocked(ctx, addr)
+ if err != nil {
+ return fmt.Errorf("locking status check for an expired object: %w", err)
+ }
+
+ if !locked {
+ return errExpired
+ }
}
return nil
@@ -361,7 +372,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
- if a.Key() != objectV2.SysAttributeExpEpoch {
+ if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
continue
}
@@ -401,7 +412,7 @@ func (v *FormatValidator) checkAttributes(obj *objectSDK.Object) error {
var errIncorrectOwner = errors.New("incorrect object owner")
func (v *FormatValidator) checkOwner(obj *objectSDK.Object) error {
- if idOwner := obj.OwnerID(); idOwner.IsEmpty() {
+ if idOwner := obj.OwnerID(); idOwner == nil || len(idOwner.WalletBytes()) == 0 {
return errIncorrectOwner
}
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index dc336eb34..2a5b5690c 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -7,10 +7,9 @@ import (
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -33,7 +32,7 @@ func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object {
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
- obj.SetOwnerID(idOwner)
+ obj.SetOwnerID(&idOwner)
return obj
}
@@ -66,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
ownerKey, err := keys.NewPrivateKey()
@@ -108,7 +107,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
- obj.SetOwnerID(idOwner)
+ obj.SetOwnerID(&idOwner)
require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
@@ -196,8 +195,6 @@ func TestFormatValidator_Validate(t *testing.T) {
val := "text"
err := v.Validate(context.Background(), fn(val), false)
require.Error(t, err)
- err = v.Validate(context.Background(), fn(val), true)
- require.Error(t, err)
})
t.Run("expired object", func(t *testing.T) {
@@ -291,7 +288,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
@@ -306,7 +303,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
- obj.SetOwnerID(owner)
+ obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -340,7 +337,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
@@ -355,7 +352,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
- obj.SetOwnerID(owner)
+ obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -389,7 +386,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
- obj.SetOwnerID(owner)
+ obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
@@ -411,14 +408,14 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -462,7 +459,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
- obj.SetOwnerID(owner)
+ obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
@@ -484,15 +481,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -538,7 +535,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
- obj.SetOwnerID(owner)
+ obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
@@ -560,15 +557,15 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
- &utilTesting.TestNetmapSource{
- Netmaps: map[uint64]*netmap.NetMap{
+ &testNetmapSource{
+ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
- CurrentEpoch: curEpoch,
+ currentEpoch: curEpoch,
},
),
- WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
+ WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.Error(t, v.Validate(context.Background(), obj, false))
@@ -579,7 +576,7 @@ type testIRSource struct {
irNodes [][]byte
}
-func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) {
+func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil
}
@@ -587,13 +584,36 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container
}
-func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
+func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
-func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
+func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
+
+type testNetmapSource struct {
+ netmaps map[uint64]*netmap.NetMap
+ currentEpoch uint64
+}
+
+func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
+ if diff >= s.currentEpoch {
+ return nil, fmt.Errorf("invalid diff")
+ }
+ return s.GetNetMapByEpoch(s.currentEpoch - diff)
+}
+
+func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
+ if nm, found := s.netmaps[epoch]; found {
+ return nm, nil
+ }
+ return nil, fmt.Errorf("netmap not found")
+}
+
+func (s *testNetmapSource) Epoch() (uint64, error) {
+ return s.currentEpoch, nil
+}
diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go
deleted file mode 100644
index aab12ebf9..000000000
--- a/pkg/core/object/info.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package object
-
-import (
- "fmt"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type ECInfo struct {
- ParentID oid.ID
- Index uint32
- Total uint32
-}
-
-func (v *ECInfo) String() string {
- if v == nil {
- return ""
- }
- return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
-}
-
-// Info groups object address with its FrostFS
-// object info.
-type Info struct {
- Address oid.Address
- Type objectSDK.Type
- IsLinkingObject bool
- ECInfo *ECInfo
-}
-
-func (v Info) String() string {
- return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
-}
diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go
index 3733ed507..79bf12ce3 100644
--- a/pkg/core/object/sender_classifier.go
+++ b/pkg/core/object/sender_classifier.go
@@ -2,7 +2,6 @@ package object
import (
"bytes"
- "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -18,7 +17,7 @@ import (
)
type InnerRing interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
+ InnerRingKeys() ([][]byte, error)
}
type SenderClassifier struct {
@@ -41,12 +40,10 @@ type ClassifyResult struct {
}
func (c SenderClassifier) Classify(
- ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
- cnr container.Container,
-) (res *ClassifyResult, err error) {
+ cnr container.Container) (res *ClassifyResult, err error) {
ownerKeyInBytes := ownerKey.Bytes()
// TODO: #767 get owner from frostfs.id if present
@@ -59,15 +56,15 @@ func (c SenderClassifier) Classify(
}, nil
}
- return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
+ return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
}
-func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
- isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes)
+func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
+ isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
- c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
- zap.Error(err))
+ c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
+ zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@@ -78,13 +75,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr)
- isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr)
+ isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
if err != nil {
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
- c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
- zap.Error(err))
+ c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
+ zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,
@@ -99,8 +96,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
}, nil
}
-func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) {
- innerRingKeys, err := c.innerRing.InnerRingKeys(ctx)
+func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
+ innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil {
return false, err
}
@@ -116,16 +113,14 @@ func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (boo
}
func (c SenderClassifier) isContainerKey(
- ctx context.Context,
owner, idCnr []byte,
- cnr container.Container,
-) (bool, error) {
- nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap
+ cnr container.Container) (bool, error) {
+ nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
if err != nil {
return false, err
}
- in, err := LookupKeyInContainer(nm, owner, idCnr, cnr)
+ in, err := lookupKeyInContainer(nm, owner, idCnr, cnr)
if err != nil {
return false, err
} else if in {
@@ -134,19 +129,18 @@ func (c SenderClassifier) isContainerKey(
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
- nm, err = core.GetPreviousNetworkMap(ctx, c.netmap)
+ nm, err = core.GetPreviousNetworkMap(c.netmap)
if err != nil {
return false, err
}
- return LookupKeyInContainer(nm, owner, idCnr, cnr)
+ return lookupKeyInContainer(nm, owner, idCnr, cnr)
}
-func LookupKeyInContainer(
+func lookupKeyInContainer(
nm *netmap.NetMap,
- pkey, idCnr []byte,
- cnr container.Container,
-) (bool, error) {
+ owner, idCnr []byte,
+ cnr container.Container) (bool, error) {
cnrVectors, err := nm.ContainerNodes(cnr.PlacementPolicy(), idCnr)
if err != nil {
return false, err
@@ -154,7 +148,7 @@ func LookupKeyInContainer(
for i := range cnrVectors {
for j := range cnrVectors[i] {
- if bytes.Equal(cnrVectors[i][j].PublicKey(), pkey) {
+ if bytes.Equal(cnrVectors[i][j].PublicKey(), owner) {
return true, nil
}
}
diff --git a/pkg/core/policy/ec.go b/pkg/core/policy/ec.go
deleted file mode 100644
index 846af775a..000000000
--- a/pkg/core/policy/ec.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package policy
-
-import (
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-// IsECPlacement returns True if policy is erasure coding policy.
-func IsECPlacement(policy netmapSDK.PlacementPolicy) bool {
- return policy.NumberOfReplicas() == 1 && policy.ReplicaDescriptor(0).GetECDataCount() > 0
-}
-
-// ECDataCount returns EC data count for EC placement policy.
-func ECDataCount(policy netmapSDK.PlacementPolicy) int {
- return int(policy.ReplicaDescriptor(0).GetECDataCount())
-}
-
-// ECParityCount returns EC parity count for EC placement policy.
-func ECParityCount(policy netmapSDK.PlacementPolicy) int {
- return int(policy.ReplicaDescriptor(0).GetECParityCount())
-}
diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go
index dfada764a..c4de07a5f 100644
--- a/pkg/innerring/bindings.go
+++ b/pkg/innerring/bindings.go
@@ -8,6 +8,7 @@ type (
// ContractProcessor interface defines functions for binding event producers
// such as event.Listener and Timers with contract processor.
ContractProcessor interface {
+ ListenerNotificationParsers() []event.NotificationParserInfo
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
@@ -15,6 +16,11 @@ type (
)
func connectListenerWithProcessor(l event.Listener, p ContractProcessor) {
+ // register notification parsers
+ for _, parser := range p.ListenerNotificationParsers() {
+ l.SetNotificationParser(parser)
+ }
+
// register notification handlers
for _, handler := range p.ListenerNotificationHandlers() {
l.RegisterNotificationHandler(handler)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index 3f9d8df5f..ad69f207b 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -3,10 +3,14 @@ package innerring
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
+ "go.uber.org/zap"
)
type (
@@ -15,12 +19,28 @@ type (
EpochDuration() uint64
}
+ alphaState interface {
+ IsAlphabet() bool
+ }
+
newEpochHandler func()
+ containerEstimationStopper interface {
+ StopEstimation(p container.StopEstimationPrm) error
+ }
+
epochTimerArgs struct {
+ l *logger.Logger
+
+ alphabetState alphaState
+
newEpochHandlers []newEpochHandler
- epoch epochState // to specify which epoch to stop, and epoch duration
+ cnrWrapper containerEstimationStopper // to invoke stop container estimation
+ epoch epochState // to specify which epoch to stop, and epoch duration
+
+ stopEstimationDMul uint32 // X: X/Y of epoch in blocks
+ stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks
}
emitTimerArgs struct {
@@ -29,7 +49,7 @@ type (
emitDuration uint32 // in blocks
}
- depositor func(context.Context) (util.Uint256, error)
+ depositor func() (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@@ -54,7 +74,7 @@ func (s *Server) tickTimers(h uint32) {
}
func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
- return timer.NewBlockTimer(
+ epochTimer := timer.NewBlockTimer(
func() (uint32, error) {
return uint32(args.epoch.EpochDuration()), nil
},
@@ -64,13 +84,42 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
}
},
)
+
+ // sub-timer for epoch timer to tick stop container estimation events at
+ // some block in epoch
+ epochTimer.OnDelta(
+ args.stopEstimationDMul,
+ args.stopEstimationDDiv,
+ func() {
+ if !args.alphabetState.IsAlphabet() {
+ args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations)
+ return
+ }
+
+ epochN := args.epoch.EpochCounter()
+ if epochN == 0 { // estimates are invalid in genesis epoch
+ return
+ }
+
+ prm := container.StopEstimationPrm{}
+ prm.SetEpoch(epochN - 1)
+
+ err := args.cnrWrapper.StopEstimation(prm)
+ if err != nil {
+ args.l.Warn(logs.InnerringCantStopEpochEstimation,
+ zap.Uint64("epoch", epochN),
+ zap.String("error", err.Error()))
+ }
+ })
+
+ return epochTimer
}
-func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
+func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
- args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
+ args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
},
)
}
diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go
index 4cbe7e394..224aa5c58 100644
--- a/pkg/innerring/blocktimer_test.go
+++ b/pkg/innerring/blocktimer_test.go
@@ -3,20 +3,29 @@ package innerring
import (
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
func TestEpochTimer(t *testing.T) {
t.Parallel()
+ alphaState := &testAlphabetState{isAlphabet: true}
neh := &testNewEpochHandler{}
+ cnrStopper := &testContainerEstStopper{}
epochState := &testEpochState{
counter: 99,
duration: 10,
}
args := &epochTimerArgs{
- newEpochHandlers: []newEpochHandler{neh.Handle},
- epoch: epochState,
+ l: test.NewLogger(t, true),
+ alphabetState: alphaState,
+ newEpochHandlers: []newEpochHandler{neh.Handle},
+ cnrWrapper: cnrStopper,
+ epoch: epochState,
+ stopEstimationDMul: 2,
+ stopEstimationDDiv: 10,
}
et := newEpochTimer(args)
err := et.Reset()
@@ -24,43 +33,63 @@ func TestEpochTimer(t *testing.T) {
et.Tick(100)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls")
et.Tick(101)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(102)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(103)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
var h uint32
for h = 104; h < 109; h++ {
et.Tick(h)
require.Equal(t, 0, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(109)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(110)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls")
et.Tick(111)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(112)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
et.Tick(113)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
for h = 114; h < 119; h++ {
et.Tick(h)
require.Equal(t, 1, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
}
et.Tick(120)
require.Equal(t, 2, neh.called, "invalid new epoch handler calls")
+ require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls")
+}
+
+type testAlphabetState struct {
+ isAlphabet bool
+}
+
+func (s *testAlphabetState) IsAlphabet() bool {
+ return s.isAlphabet
}
type testNewEpochHandler struct {
@@ -71,6 +100,15 @@ func (h *testNewEpochHandler) Handle() {
h.called++
}
+type testContainerEstStopper struct {
+ called int
+}
+
+func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error {
+ s.called++
+ return nil
+}
+
type testEpochState struct {
counter uint64
duration uint64
@@ -79,7 +117,6 @@ type testEpochState struct {
func (s *testEpochState) EpochCounter() uint64 {
return s.counter
}
-
func (s *testEpochState) EpochDuration() uint64 {
return s.duration
}
diff --git a/pkg/innerring/config/fee_test.go b/pkg/innerring/config/fee_test.go
index ced21b238..f7330c6ca 100644
--- a/pkg/innerring/config/fee_test.go
+++ b/pkg/innerring/config/fee_test.go
@@ -60,4 +60,5 @@ fee:
require.Equal(t, fixedn.Fixed8(10), config.MainChainFee(), "main chain fee invalid")
require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid")
})
+
}
diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go
index 7deec3f31..4a80ebf3b 100644
--- a/pkg/innerring/fetcher.go
+++ b/pkg/innerring/fetcher.go
@@ -1,8 +1,6 @@
package innerring
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -49,12 +47,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain.
-func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
- return fN.cli.NeoFSAlphabetList(ctx)
+func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
+ return fN.cli.NeoFSAlphabetList()
}
// InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain.
-func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) {
- return f.nm.GetInnerRingList(ctx)
+func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
+ return f.nm.GetInnerRingList()
}
diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go
index 439400bac..ac5fb93ef 100644
--- a/pkg/innerring/indexer.go
+++ b/pkg/innerring/indexer.go
@@ -1,7 +1,7 @@
package innerring
import (
- "context"
+ "bytes"
"fmt"
"sync"
"time"
@@ -11,7 +11,7 @@ import (
type (
irFetcher interface {
- InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
+ InnerRingKeys() (keys.PublicKeys, error)
}
committeeFetcher interface {
@@ -46,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
}
}
-func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) {
+func (s *innerRingIndexer) update() (ind indexes, err error) {
s.RLock()
if time.Since(s.lastAccess) < s.timeout {
@@ -63,7 +63,7 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil
}
- innerRing, err := s.irFetcher.InnerRingKeys(ctx)
+ innerRing, err := s.irFetcher.InnerRingKeys()
if err != nil {
return indexes{}, err
}
@@ -82,8 +82,8 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil
}
-func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -91,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
return ind.innerRingIndex, nil
}
-func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) InnerRingSize() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -100,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
return ind.innerRingSize, nil
}
-func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) {
- ind, err := s.update(ctx)
+func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
+ ind, err := s.update()
if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err)
}
@@ -111,11 +111,16 @@ func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) {
// keyPosition returns "-1" if key is not found in the list, otherwise returns
// index of the key.
-func keyPosition(key *keys.PublicKey, list keys.PublicKeys) int32 {
+func keyPosition(key *keys.PublicKey, list keys.PublicKeys) (result int32) {
+ result = -1
+ rawBytes := key.Bytes()
+
for i := range list {
- if key.Equal(list[i]) {
- return int32(i)
+ if bytes.Equal(list[i].Bytes(), rawBytes) {
+ result = int32(i)
+ break
}
}
- return -1
+
+ return result
}
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
index f8201b7df..1937f7a49 100644
--- a/pkg/innerring/indexer_test.go
+++ b/pkg/innerring/indexer_test.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"sync/atomic"
"testing"
@@ -38,15 +37,15 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(2), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(3), size, "invalid IR size")
})
@@ -57,11 +56,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(0), idx, "invalid IR index")
})
@@ -72,11 +71,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
})
@@ -101,30 +100,30 @@ func TestIndexerCachesIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -133,15 +132,15 @@ func TestIndexerCachesIndexes(t *testing.T) {
time.Sleep(2 * time.Second)
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -166,15 +165,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err := indexer.AlphabetIndex(context.Background())
+ idx, err := indexer.AlphabetIndex()
require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err := indexer.InnerRingSize(context.Background())
+ size, err := indexer.InnerRingSize()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
@@ -190,15 +189,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer = newInnerRingIndexer(cf, irf, key, time.Second)
- idx, err = indexer.AlphabetIndex(context.Background())
+ idx, err = indexer.AlphabetIndex()
require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index")
- idx, err = indexer.InnerRingIndex(context.Background())
+ idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index")
- size, err = indexer.InnerRingSize(context.Background())
+ size, err = indexer.InnerRingSize()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size")
}
@@ -220,27 +219,7 @@ type testIRFetcher struct {
calls atomic.Int32
}
-func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
f.calls.Add(1)
return f.keys, f.err
}
-
-func BenchmarkKeyPosition(b *testing.B) {
- list := make(keys.PublicKeys, 7)
- for i := range list {
- p, err := keys.NewPrivateKey()
- require.NoError(b, err)
- list[i] = p.PublicKey()
- }
-
- key := new(keys.PublicKey)
- require.NoError(b, key.DecodeBytes(list[5].Bytes()))
-
- b.ResetTimer()
- b.ReportAllocs()
- for range b.N {
- if keyPosition(key, list) != 5 {
- b.FailNow()
- }
- }
-}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 3d236641e..84112d121 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -5,7 +5,6 @@ import (
"encoding/hex"
"fmt"
"net"
- "sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
@@ -27,7 +26,6 @@ import (
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/spf13/viper"
@@ -35,30 +33,34 @@ import (
"google.golang.org/grpc"
)
-func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
- alphaSync event.Handler,
-) error {
- locodeValidator := s.newLocodeValidator(cfg)
+func (s *Server) initNetmapProcessor(cfg *viper.Viper,
+ cnrClient *container.Client,
+ alphaSync event.Handler) error {
+ locodeValidator, err := s.newLocodeValidator(cfg)
+ if err != nil {
+ return err
+ }
+
+ if err != nil {
+ return err
+ }
netSettings := (*networkSettings)(s.netmapClient)
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
- poolSize := cfg.GetInt("workers.netmap")
- s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
-
- var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.netmap"),
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
EpochTimer: s,
EpochState: s,
AlphabetState: s,
CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"),
CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"),
+ ContainerWrapper: cnrClient,
NotaryDepositHandler: s.onlyAlphabetEventHandler(
s.notaryHandler,
),
@@ -73,6 +75,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
NodeStateSettings: netSettings,
})
+
if err != nil {
return err
}
@@ -98,7 +101,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
- s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
+ s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@@ -138,12 +141,12 @@ func (s *Server) enableNotarySupport() error {
return nil
}
-func (s *Server) initNotaryConfig(ctx context.Context) {
+func (s *Server) initNotaryConfig() {
s.mainNotaryConfig = notaryConfigs(
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
- s.log.Info(ctx, logs.InnerringNotarySupport,
+ s.log.Info(logs.InnerringNotarySupport,
zap.Bool("sidechain_enabled", true),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@@ -153,15 +156,16 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
var alphaSync event.Handler
if s.withoutMainNet || cfg.GetBool("governance.disable") {
- alphaSync = func(ctx context.Context, _ event.Event) {
- s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
+ alphaSync = func(event.Event) {
+ s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
}
} else {
// create governance processor
governanceProcessor, err := governance.New(&governance.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
+ NetmapClient: s.netmapClient,
AlphabetState: s,
EpochState: s,
Voter: s,
@@ -197,16 +201,21 @@ func (s *Server) createIRFetcher() irFetcher {
return irf
}
-func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
+func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) {
s.epochTimer = newEpochTimer(&epochTimerArgs{
- newEpochHandlers: s.newEpochTickHandlers(ctx),
- epoch: s,
+ l: s.log,
+ alphabetState: s,
+ newEpochHandlers: s.newEpochTickHandlers(),
+ cnrWrapper: morphClients.CnrClient,
+ epoch: s,
+ stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"),
+ stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"),
})
s.addBlockTimer(s.epochTimer)
// initialize emission timer
- emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
+ emissionTimer := newEmissionTimer(&emitTimerArgs{
ap: s.alphabetProcessor,
emitDuration: cfg.GetUint32("timers.emit"),
})
@@ -214,20 +223,18 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.addBlockTimer(emissionTimer)
}
-func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
+func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
if err != nil {
return err
}
- poolSize := cfg.GetInt("workers.alphabet")
- s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.alphabet"),
AlphabetContracts: s.contracts.alphabet,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@@ -242,14 +249,13 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er
return err
}
-func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
- poolSize := cfg.GetInt("workers.container")
- s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
+func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client,
+ frostfsIDClient *frostfsid.Client) error {
// container processor
containerProcessor, err := cont.New(&cont.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.container"),
AlphabetState: s,
ContainerClient: cnrClient,
MorphClient: cnrClient.Morph(),
@@ -263,14 +269,12 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c
return bindMorphProcessor(containerProcessor, s)
}
-func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
- poolSize := cfg.GetInt("workers.balance")
- s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
+func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.balance"),
FrostFSClient: frostfsCli,
BalanceSC: s.contracts.balance,
AlphabetState: s,
@@ -283,18 +287,17 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro
return bindMorphProcessor(balanceProcessor, s)
}
-func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
+func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper, frostfsIDClient *frostfsid.Client) error {
if s.withoutMainNet {
return nil
}
- poolSize := cfg.GetInt("workers.frostfs")
- s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
- Log: s.log.WithTag(logger.TagProcessor),
+ Log: s.log,
Metrics: s.irMetrics,
- PoolSize: poolSize,
+ PoolSize: cfg.GetInt("workers.frostfs"),
FrostFSContract: s.contracts.frostfs,
+ FrostFSIDClient: frostfsIDClient,
BalanceClient: s.balanceClient,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@@ -313,10 +316,10 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip
return bindMainnetProcessor(frostfsProcessor, s)
}
-func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
+func (s *Server) initGRPCServer(cfg *viper.Viper) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
- s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
+ s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@@ -340,9 +343,9 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg
p.SetPrivateKey(*s.key)
p.SetHealthChecker(s)
- controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
+ controlSvc := controlsrv.New(p, s.netmapClient,
controlsrv.WithAllowedKeys(authKeys),
- ), log.WithTag(logger.TagGrpcSvc), audit)
+ )
grpcControlSrv := grpc.NewServer()
control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
@@ -378,6 +381,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
// form morph container client's options
morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
+ container.TryNotary(),
container.AsAlphabet(),
)
@@ -385,19 +389,18 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
if err != nil {
return nil, err
}
- s.containerClient = result.CnrClient
- s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet())
+ s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet())
if err != nil {
return nil, err
}
- s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet())
+ s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet())
if err != nil {
return nil, err
}
- result.FrostFSIDClient, err = frostfsid.NewFromMorph(s.morphClient, s.contracts.frostfsID, fee)
+ result.FrostFSIDClient, err = frostfsid.NewFromMorph(s.morphClient, s.contracts.frostfsID, fee, frostfsid.TryNotary(), frostfsid.AsAlphabet())
if err != nil {
return nil, err
}
@@ -411,7 +414,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
return result, nil
}
-func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
+func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@@ -426,27 +429,27 @@ func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClie
return err
}
- err = s.initNetmapProcessor(ctx, cfg, alphaSync)
+ err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync)
if err != nil {
return err
}
- err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
+ err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
if err != nil {
return err
}
- err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
+ err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
if err != nil {
return err
}
- err = s.initFrostFSMainnetProcessor(ctx, cfg)
+ err = s.initFrostFSMainnetProcessor(cfg, morphClients.FrostFSIDClient)
if err != nil {
return err
}
- err = s.initAlphabetProcessor(ctx, cfg)
+ err = s.initAlphabetProcessor(cfg)
return err
}
@@ -454,17 +457,16 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
+ s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{
- log: s.log.WithTag(logger.TagMorph),
+ log: s.log,
cfg: cfg,
key: s.key,
name: morphPrefix,
from: fromSideChainBlock,
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
- multinetMetrics: s.irMetrics.Multinet(),
}
// create morph client
@@ -479,7 +481,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
- morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
+ morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
}
return morphChain, nil
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 3a5137261..1567e40d3 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -8,16 +8,14 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
@@ -25,7 +23,6 @@ import (
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/precision"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -49,17 +46,16 @@ type (
epochTimer *timer.BlockTimer
// global state
- morphClient *client.Client
- mainnetClient *client.Client
- epochCounter atomic.Uint64
- epochDuration atomic.Uint64
- statusIndex *innerRingIndexer
- precision precision.Fixed8Converter
- healthStatus atomic.Int32
- balanceClient *balanceClient.Client
- netmapClient *nmClient.Client
- persistate *state.PersistentStorage
- containerClient *container.Client
+ morphClient *client.Client
+ mainnetClient *client.Client
+ epochCounter atomic.Uint64
+ epochDuration atomic.Uint64
+ statusIndex *innerRingIndexer
+ precision precision.Fixed8Converter
+ healthStatus atomic.Int32
+ balanceClient *balanceClient.Client
+ netmapClient *nmClient.Client
+ persistate *state.PersistentStorage
// metrics
irMetrics *metrics.InnerRingServiceMetrics
@@ -70,11 +66,11 @@ type (
// internal variables
key *keys.PrivateKey
+ pubKey []byte
contracts *contracts
predefinedValidators keys.PublicKeys
initialEpochTickDelta uint32
withoutMainNet bool
- sdNotify bool
// runtime processors
netmapProcessor *netmap.Processor
@@ -103,10 +99,6 @@ type (
// should report start errors
// to the application.
runners []func(chan<- error) error
-
- // cmode used for upgrade scenario.
- // nolint:unused
- cmode *atomic.Bool
}
chainParams struct {
@@ -117,7 +109,6 @@ type (
sgn *transaction.Signer
from uint32 // block height
morphCacheMetric metrics.MorphCacheMetrics
- multinetMetrics metrics.MultinetMetrics
}
)
@@ -140,10 +131,10 @@ var (
// Start runs all event providers.
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
- s.setHealthStatus(ctx, control.HealthStatus_STARTING)
+ s.setHealthStatus(control.HealthStatus_STARTING)
defer func() {
if err == nil {
- s.setHealthStatus(ctx, control.HealthStatus_READY)
+ s.setHealthStatus(control.HealthStatus_READY)
}
}()
@@ -152,12 +143,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
- err = s.initConfigFromBlockchain(ctx)
+ err = s.initConfigFromBlockchain()
if err != nil {
return err
}
- if s.IsAlphabet(ctx) {
+ if s.IsAlphabet() {
err = s.initMainNotary(ctx)
if err != nil {
return err
@@ -173,14 +164,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
prm.Validators = s.predefinedValidators
// vote for sidechain validator if it is prepared in config
- err = s.voteForSidechainValidator(ctx, prm)
+ err = s.voteForSidechainValidator(prm)
if err != nil {
// we don't stop inner ring execution on this error
- s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
- zap.Error(err))
+ s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
+ zap.String("error", err.Error()))
}
- s.tickInitialExpoch(ctx)
+ s.tickInitialExpoch()
morphErr := make(chan error)
mainnnetErr := make(chan error)
@@ -217,14 +208,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
func (s *Server) registerMorphNewBlockEventHandler() {
- s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
- s.log.Debug(ctx, logs.InnerringNewBlock,
+ s.morphListener.RegisterBlockHandler(func(b *block.Block) {
+ s.log.Debug(logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@@ -235,10 +226,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
func (s *Server) registerMainnetNewBlockEventHandler() {
if !s.withoutMainNet {
- s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
+ s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@@ -283,11 +274,11 @@ func (s *Server) initSideNotary(ctx context.Context) error {
)
}
-func (s *Server) tickInitialExpoch(ctx context.Context) {
+func (s *Server) tickInitialExpoch() {
initialEpochTicker := timer.NewOneTickTimer(
timer.StaticBlockMeter(s.initialEpochTickDelta),
func() {
- s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
})
s.addBlockTimer(initialEpochTicker)
}
@@ -299,16 +290,16 @@ func (s *Server) startWorkers(ctx context.Context) {
}
// Stop closes all subscription channels.
-func (s *Server) Stop(ctx context.Context) {
- s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
+func (s *Server) Stop() {
+ s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
go s.morphListener.Stop()
go s.mainnetListener.Stop()
for _, c := range s.closers {
if err := c(); err != nil {
- s.log.Warn(ctx, logs.InnerringCloserError,
- zap.Error(err),
+ s.log.Warn(logs.InnerringCloserError,
+ zap.String("error", err.Error()),
)
}
}
@@ -334,22 +325,14 @@ func (s *Server) registerStarter(f func() error) {
}
// New creates instance of inner ring sever structure.
-func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error,
- metrics *metrics.InnerRingServiceMetrics, cmode *atomic.Bool, audit *atomic.Bool,
-) (*Server, error) {
+func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error) (*Server, error) {
var err error
server := &Server{
- log: log.WithTag(logger.TagIr),
- irMetrics: metrics,
- cmode: cmode,
+ log: log,
+ irMetrics: metrics.NewInnerRingMetrics(),
}
- server.sdNotify, err = server.initSdNotify(cfg)
- if err != nil {
- return nil, err
- }
-
- server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
+ server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
// parse notary support
server.feeConfig = config.NewFeeConfig(cfg)
@@ -376,7 +359,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
- server.initNotaryConfig(ctx)
+ server.initNotaryConfig()
err = server.initContracts(cfg)
if err != nil {
@@ -394,20 +377,22 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, fmt.Errorf("ir: can't parse predefined validators list: %w", err)
}
+ server.pubKey = server.key.PublicKey().Bytes()
+
var morphClients *serverMorphClients
morphClients, err = server.initClientsFromMorph()
if err != nil {
return nil, err
}
- err = server.initProcessors(ctx, cfg, morphClients)
+ err = server.initProcessors(cfg, morphClients)
if err != nil {
return nil, err
}
- server.initTimers(ctx, cfg)
+ server.initTimers(cfg, morphClients)
- err = server.initGRPCServer(ctx, cfg, log, audit)
+ err = server.initGRPCServer(cfg)
if err != nil {
return nil, err
}
@@ -415,13 +400,6 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return server, nil
}
-func (s *Server) initSdNotify(cfg *viper.Viper) (bool, error) {
- if cfg.GetBool("systemdnotify.enabled") {
- return true, sdnotify.InitSocket()
- }
- return false, nil
-}
-
func createListener(ctx context.Context, cli *client.Client, p *chainParams) (event.Listener, error) {
var (
sub subscriber.Subscriber
@@ -438,7 +416,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
- Logger: p.log.With(zap.String("chain", p.name)),
+ Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
Subscriber: sub,
})
if err != nil {
@@ -467,20 +445,9 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
priority = defaultPriority
}
- var mtlsConfig *client.MTLSConfig
- rootCAs := p.cfg.GetStringSlice(fmt.Sprintf("%s.%d.trusted_ca_list", section, i))
- if len(rootCAs) != 0 {
- mtlsConfig = &client.MTLSConfig{
- TrustedCAList: rootCAs,
- KeyFile: p.cfg.GetString(fmt.Sprintf("%s.%d.key", section, i)),
- CertFile: p.cfg.GetString(fmt.Sprintf("%s.%d.certificate", section, i)),
- }
- }
-
endpoints = append(endpoints, client.Endpoint{
- Address: addr,
- Priority: priority,
- MTLSConfig: mtlsConfig,
+ Address: addr,
+ Priority: priority,
})
}
@@ -488,12 +455,6 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
}
- nc := parseMultinetConfig(p.cfg, p.multinetMetrics)
- ds, err := internalNet.NewDialerSource(nc)
- if err != nil {
- return nil, fmt.Errorf("dialer source: %w", err)
- }
-
return client.New(
ctx,
p.key,
@@ -506,7 +467,6 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
}),
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
client.WithMorphCacheMetrics(p.morphCacheMetric),
- client.WithDialerSource(ds),
)
}
@@ -551,43 +511,21 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) {
return extraWallets, nil
}
-func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config {
- nc := internalNet.Config{
- Enabled: cfg.GetBool("multinet.enabled"),
- Balancer: cfg.GetString("multinet.balancer"),
- Restrict: cfg.GetBool("multinet.restrict"),
- FallbackDelay: cfg.GetDuration("multinet.fallback_delay"),
- Metrics: m,
- }
- for i := 0; ; i++ {
- mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i))
- if mask == "" {
- break
- }
- sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i))
- nc.Subnets = append(nc.Subnets, internalNet.Subnet{
- Prefix: mask,
- SourceIPs: sourceIPs,
- })
- }
- return nc
-}
-
-func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
+func (s *Server) initConfigFromBlockchain() error {
// get current epoch
- epoch, err := s.netmapClient.Epoch(ctx)
+ epoch, err := s.netmapClient.Epoch()
if err != nil {
return fmt.Errorf("can't read epoch number: %w", err)
}
// get current epoch duration
- epochDuration, err := s.netmapClient.EpochDuration(ctx)
+ epochDuration, err := s.netmapClient.EpochDuration()
if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err)
}
// get balance precision
- balancePrecision, err := s.balanceClient.Decimals(ctx)
+ balancePrecision, err := s.balanceClient.Decimals()
if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err)
}
@@ -597,14 +535,14 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick
- s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx)
+ s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
if err != nil {
return err
}
- s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
- zap.Bool("active", s.IsActive(ctx)),
- zap.Bool("alphabet", s.IsAlphabet(ctx)),
+ s.log.Debug(logs.InnerringReadConfigFromBlockchain,
+ zap.Bool("active", s.IsActive()),
+ zap.Bool("alphabet", s.IsAlphabet()),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@@ -613,8 +551,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
return nil
}
-func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
- epochBlock, err := s.netmapClient.LastEpochBlock(ctx)
+func (s *Server) nextEpochBlockDelta() (uint32, error) {
+ epochBlock, err := s.netmapClient.LastEpochBlock()
if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err)
}
@@ -635,17 +573,17 @@ func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) {
// onlyAlphabet wrapper around event handler that executes it
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
- return func(ctx context.Context, ev event.Event) {
- if s.IsAlphabet(ctx) {
- f(ctx, ev)
+ return func(ev event.Event) {
+ if s.IsAlphabet() {
+ f(ev)
}
}
}
-func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
+func (s *Server) newEpochTickHandlers() []newEpochHandler {
newEpochHandlers := []newEpochHandler{
func() {
- s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
+ s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
},
}
diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go
index ae4c85168..a9a9498b6 100644
--- a/pkg/innerring/locode.go
+++ b/pkg/innerring/locode.go
@@ -1,15 +1,15 @@
package innerring
import (
- "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
irlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
"github.com/spf13/viper"
)
-func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
+func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"),
},
@@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB),
- })
+ }), nil
}
type locodeBoltEntryWrapper struct {
diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go
index fb11e9426..9961710ca 100644
--- a/pkg/innerring/netmap.go
+++ b/pkg/innerring/netmap.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -18,8 +17,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed.
-func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error {
- allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx)
+func (s *networkSettings) MaintenanceModeAllowed() error {
+ allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed {
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index c8a69962f..c601f5587 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -28,39 +28,37 @@ const (
gasDivisor = 2
)
-func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
+func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
- ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
-func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
+func (s *Server) depositSideNotary() (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
- tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
- return tx, err
+ return s.morphClient.DepositEndlessNotary(depositAmount)
}
-func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
+func (s *Server) notaryHandler(_ event.Event) {
if !s.mainNotaryConfig.disabled {
- _, err := s.depositMainNotary(ctx)
+ _, err := s.depositMainNotary()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
- if _, err := s.depositSideNotary(ctx); err != nil {
- s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
+ if _, err := s.depositSideNotary(); err != nil {
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@@ -73,7 +71,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
- tx, err := deposit(ctx)
+ tx, err := deposit()
if err != nil {
return err
}
@@ -82,17 +80,17 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
+ s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
return nil
}
- s.log.Info(ctx, msg)
+ s.log.Info(msg)
return await(ctx, tx)
}
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
- for range notaryDepositTimeout {
+ for i := 0; i < notaryDepositTimeout; i++ {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index d6b653282..9de075f17 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,8 +1,6 @@
package alphabet
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@@ -10,16 +8,16 @@ import (
"go.uber.org/zap"
)
-func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
+func (ap *Processor) HandleGasEmission(ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
- ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
+ ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
- err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
+ err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
if err != nil {
// there system can be moved into controlled degradation stage
- ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
+ ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index 1da3c401d..c098ca27d 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -1,13 +1,11 @@
package alphabet_test
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -23,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := range index + 1 {
+ for i := 0; i <= index; i++ {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -50,7 +48,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
params := &alphabet.Params{
ParsedWallets: parsedWallets,
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
StorageEmission: emission,
IRList: &testIndexer{index: index},
@@ -62,7 +60,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -97,10 +95,10 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160
+ var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := range index + 1 {
+ for i := 0; i <= index; i++ {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -127,7 +125,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
params := &alphabet.Params{
ParsedWallets: parsedWallets,
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
StorageEmission: emission,
IRList: &testIndexer{index: index},
@@ -139,7 +137,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -169,16 +167,16 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160
+ var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := range index + 1 {
+ for i := 0; i <= index; i++ {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
morphClient := &testMorphClient{}
- var nodes []netmap.NodeInfo
+ nodes := []netmap.NodeInfo{}
network := &netmap.NetMap{}
network.SetNodes(nodes)
@@ -188,7 +186,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
params := &alphabet.Params{
ParsedWallets: parsedWallets,
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
StorageEmission: emission,
IRList: &testIndexer{index: index},
@@ -200,7 +198,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
- processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
+ processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@@ -221,7 +219,7 @@ type testIndexer struct {
index int
}
-func (i *testIndexer) AlphabetIndex(context.Context) int {
+func (i *testIndexer) AlphabetIndex() int {
return i.index
}
@@ -248,7 +246,7 @@ type testMorphClient struct {
batchTransferedGas []batchTransferGas
}
-func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) {
+func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error {
c.invokedMethods = append(c.invokedMethods,
invokedMethod{
contract: contract,
@@ -256,9 +254,8 @@ func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee f
method: method,
args: args,
})
- return client.InvokeRes{}, nil
+ return nil
}
-
func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
c.transferedGas = append(c.transferedGas, transferGas{
receiver: receiver,
@@ -279,6 +276,6 @@ type testNetmapClient struct {
netmap *netmap.NetMap
}
-func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index d3d0f83f2..8a2336011 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -1,7 +1,6 @@
package alphabet
import (
- "context"
"crypto/elliptic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -14,40 +13,40 @@ import (
const emitMethod = "emit"
-func (ap *Processor) processEmit(ctx context.Context) bool {
- index := ap.irList.AlphabetIndex(ctx)
+func (ap *Processor) processEmit() bool {
+ index := ap.irList.AlphabetIndex()
if index < 0 {
- ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
+ ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
- ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
+ ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return false
}
// there is no signature collecting, so we don't need extra fee
- _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
+ err := ap.morphClient.Invoke(contract, 0, emitMethod)
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
if ap.storageEmission == 0 {
- ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
+ ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
return true
}
- networkMap, err := ap.netmapClient.NetMap(ctx)
+ networkMap, err := ap.netmapClient.NetMap()
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
- zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
+ zap.String("error", err.Error()))
return false
}
@@ -59,7 +58,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
ap.pwLock.RUnlock()
extraLen := len(pw)
- ap.log.Debug(ctx, logs.AlphabetGasEmission,
+ ap.log.Debug(logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@@ -69,37 +68,37 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
- ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
+ ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
- ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
+ ap.transferGasToExtraNodes(pw, gasPerNode)
return true
}
-func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
- zap.Error(err))
+ ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
+ zap.String("error", err.Error()))
continue
}
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
- ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
+ ap.log.Warn(logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
}
-func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
+func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
if len(pw) > 0 {
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
if err != nil {
@@ -107,10 +106,10 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
for i, addr := range pw {
receiversLog[i] = addr.StringLE()
}
- ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
+ ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index 0aea74003..04dde80f7 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -1,26 +1,26 @@
package alphabet
import (
- "context"
"errors"
"fmt"
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
- AlphabetIndex(context.Context) int
+ AlphabetIndex() int
}
// Contracts is an interface of the storage
@@ -36,11 +36,11 @@ type (
}
netmapClient interface {
- NetMap(ctx context.Context) (*netmap.NetMap, error)
+ NetMap() (*netmap.NetMap, error)
}
morphClient interface {
- Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error)
+ Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
}
@@ -85,6 +85,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
+ p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -114,6 +116,11 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) {
ap.pwLock.Unlock()
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ return nil
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index b5d05a02e..e39f3abbd 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -11,20 +10,20 @@ import (
"go.uber.org/zap"
)
-func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
+func (bp *Processor) handleLock(ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info(ctx, logs.Notification,
+ bp.log.Info(logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
// send an event to the worker pool
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
- return bp.processLock(ctx, &lock)
+ return bp.processLock(&lock)
})
if err != nil {
// there system can be moved into controlled degradation stage
- bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
+ bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go
index 0fd23d8ab..3470fba2d 100644
--- a/pkg/innerring/processors/balance/handlers_test.go
+++ b/pkg/innerring/processors/balance/handlers_test.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"testing"
"time"
@@ -22,7 +21,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
bsc := util.Uint160{100}
processor, err := New(&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
FrostFSClient: cl,
BalanceSC: bsc,
@@ -31,7 +30,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(context.Background(), balanceEvent.Lock{})
+ processor.handleLock(balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -48,7 +47,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
bsc := util.Uint160{100}
processor, err := New(&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
FrostFSClient: cl,
BalanceSC: bsc,
@@ -57,7 +56,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
- processor.handleLock(context.Background(), balanceEvent.Lock{})
+ processor.handleLock(balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -70,11 +69,12 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
-type testPresicionConverter struct{}
+type testPresicionConverter struct {
+}
func (c *testPresicionConverter) ToFixed8(v int64) int64 {
return v
@@ -84,7 +84,7 @@ type testFrostFSContractClient struct {
chequeCalls int
}
-func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
+func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
c.chequeCalls++
return nil
}
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 60475908c..1d94fa454 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@@ -11,9 +9,9 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
-func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
- if !bp.alphabetState.IsAlphabet(ctx) {
- bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
+func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
+ if !bp.alphabetState.IsAlphabet() {
+ bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return true
}
@@ -25,9 +23,9 @@ func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) b
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
- err := bp.frostfsClient.Cheque(ctx, prm)
+ err := bp.frostfsClient.Cheque(prm)
if err != nil {
- bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
+ bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 34203b74f..5cc849b5c 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -1,10 +1,10 @@
package balance
import (
- "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -12,12 +12,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// PrecisionConverter converts balance amount values.
@@ -26,7 +27,7 @@ type (
}
FrostFSClient interface {
- Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
+ Cheque(p frostfscontract.ChequePrm) error
}
// Processor of events produced by balance contract in the morphchain.
@@ -67,6 +68,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
+ p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
@@ -88,16 +91,32 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var parsers []event.NotificationParserInfo
+
+ // new lock event
+ lock := event.NotificationParserInfo{}
+ lock.SetType(lockNotification)
+ lock.SetScriptHash(bp.balanceSC)
+ lock.SetParser(balanceEvent.ParseLock)
+ parsers = append(parsers, lock)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: bp.balanceSC,
- Type: lockNotification,
- Parser: balanceEvent.ParseLock,
- Handlers: []event.Handler{bp.handleLock},
- },
- }
+ var handlers []event.NotificationHandlerInfo
+
+ // lock handler
+ lock := event.NotificationHandlerInfo{}
+ lock.SetType(lockNotification)
+ lock.SetScriptHash(bp.balanceSC)
+ lock.SetHandler(bp.handleLock)
+ handlers = append(handlers, lock)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go
index 5334b9a1f..97eb6f559 100644
--- a/pkg/innerring/processors/container/common.go
+++ b/pkg/innerring/processors/container/common.go
@@ -1,11 +1,11 @@
package container
import (
- "context"
"crypto/ecdsa"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -46,7 +46,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v
// - session is "alive"
-func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error {
+func (cp *Processor) verifySignature(v signatureVerificationData) error {
var err error
var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil
@@ -59,7 +59,7 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
}
if len(v.binTokenSession) > 0 {
- return cp.verifyByTokenSession(ctx, v, &key, keyProvided)
+ return cp.verifyByTokenSession(v, &key, keyProvided)
}
if keyProvided {
@@ -73,13 +73,27 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
return errors.New("invalid signature calculated by container owner's key")
}
+ } else {
+ var prm frostfsid.AccountKeysPrm
+ prm.SetID(v.ownerContainer)
+
+ ownerKeys, err := cp.idClient.AccountKeys(prm)
+ if err != nil {
+ return fmt.Errorf("receive owner keys %s: %w", v.ownerContainer, err)
+ }
+
+ for i := range ownerKeys {
+ if (*frostfsecdsa.PublicKeyRFC6979)(ownerKeys[i]).Verify(v.signedData, v.signature) {
+ return nil
+ }
+ }
}
return errors.New("signature is invalid or calculated with the key not bound to the container owner")
}
-func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error {
- curEpoch, err := cp.netState.Epoch(ctx)
+func (cp *Processor) checkTokenLifetime(token session.Container) error {
+ curEpoch, err := cp.netState.Epoch()
if err != nil {
return fmt.Errorf("could not read current epoch: %w", err)
}
@@ -91,7 +105,7 @@ func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Conta
return nil
}
-func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
+func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
var tok session.Container
err := tok.Unmarshal(v.binTokenSession)
@@ -119,7 +133,7 @@ func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerifi
return errors.New("owner differs with token owner")
}
- err = cp.checkTokenLifetime(ctx, tok)
+ err = cp.checkTokenLifetime(tok)
if err != nil {
return fmt.Errorf("check session lifetime: %w", err)
}
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index bb038a3cb..3ec10b889 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,40 +11,59 @@ import (
"go.uber.org/zap"
)
-func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
+func (cp *Processor) handlePut(ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info(ctx, logs.Notification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
- return cp.processContainerPut(ctx, put)
+ return cp.processContainerPut(put)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
-func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
+func (cp *Processor) handleDelete(ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info(ctx, logs.Notification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
- return cp.processContainerDelete(ctx, del)
+ return cp.processContainerDelete(del)
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
+ zap.Int("capacity", cp.pool.Cap()))
+ }
+}
+
+func (cp *Processor) handleSetEACL(ev event.Event) {
+ e := ev.(containerEvent.SetEACL)
+
+ cp.log.Info(logs.Notification,
+ zap.String("type", "set EACL"),
+ )
+
+ // send an event to the worker pool
+
+ err := processors.SubmitEvent(cp.pool, cp.metrics, "container_set_eacl", func() bool {
+ return cp.processSetEACL(e)
+ })
+ if err != nil {
+ // there system can be moved into controlled degradation stage
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go
index 1b3842eb0..77dbe876a 100644
--- a/pkg/innerring/processors/container/handlers_test.go
+++ b/pkg/innerring/processors/container/handlers_test.go
@@ -1,15 +1,14 @@
package container
import (
- "context"
"crypto/ecdsa"
"encoding/hex"
"testing"
"time"
- frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -18,8 +17,10 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/network/payload"
@@ -36,13 +37,13 @@ func TestPutEvent(t *testing.T) {
mc := &testMorphClient{}
proc, err := New(&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
AlphabetState: &testAlphabetState{isAlphabet: true},
+ FrostFSIDClient: &testIDClient{},
NetworkState: nst,
ContainerClient: &testContainerClient{},
MorphClient: mc,
- FrostFSIDClient: &testFrostFSIDClient{},
})
require.NoError(t, err, "failed to create processor")
@@ -72,7 +73,7 @@ func TestPutEvent(t *testing.T) {
nr: nr,
}
- proc.handlePut(context.Background(), event)
+ proc.handlePut(event)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -94,16 +95,21 @@ func TestDeleteEvent(t *testing.T) {
p, err := keys.NewPrivateKey()
require.NoError(t, err)
+ idc := &testIDClient{
+ publicKeys: []*keys.PublicKey{
+ p.PublicKey(),
+ },
+ }
mc := &testMorphClient{}
proc, err := New(&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 2,
AlphabetState: &testAlphabetState{isAlphabet: true},
+ FrostFSIDClient: idc,
NetworkState: nst,
ContainerClient: cc,
MorphClient: mc,
- FrostFSIDClient: &testFrostFSIDClient{},
})
require.NoError(t, err, "failed to create processor")
@@ -133,7 +139,6 @@ func TestDeleteEvent(t *testing.T) {
ContainerIDValue: cidBin,
SignatureValue: p.Sign(cidBin),
NotaryRequestValue: nr,
- PublicKeyValue: p.PublicKey().Bytes(),
}
var signature frostfscrypto.Signature
@@ -144,7 +149,7 @@ func TestDeleteEvent(t *testing.T) {
Signature: signature,
}
- proc.handleDelete(context.Background(), ev)
+ proc.handleDelete(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -157,11 +162,96 @@ func TestDeleteEvent(t *testing.T) {
require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests")
}
+func TestSetEACLEvent(t *testing.T) {
+ t.Parallel()
+ nst := &testNetworkState{
+ homHashDisabled: true,
+ epoch: 100,
+ }
+ cc := &testContainerClient{
+ get: make(map[string]*containercore.Container),
+ }
+ mc := &testMorphClient{}
+
+ proc, err := New(&Params{
+ Log: test.NewLogger(t, true),
+ PoolSize: 2,
+ AlphabetState: &testAlphabetState{isAlphabet: true},
+ FrostFSIDClient: &testIDClient{},
+ NetworkState: nst,
+ ContainerClient: cc,
+ MorphClient: mc,
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ p, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ var usr user.ID
+ user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey()))
+
+ var pp netmap.PlacementPolicy
+ pp.AddReplicas(netmap.ReplicaDescriptor{})
+
+ var cnr containerSDK.Container
+ cnr.Init()
+ cnr.SetOwner(usr)
+ cnr.SetPlacementPolicy(pp)
+ cnr.SetBasicACL(acl.PrivateExtended)
+ containerSDK.DisableHomomorphicHashing(&cnr)
+
+ var cid cid.ID
+ containerSDK.CalculateID(&cid, cnr)
+ cidBytes := make([]byte, 32)
+ cid.Encode(cidBytes)
+
+ var signature frostfscrypto.Signature
+ signer := frostfsecdsa.Signer(p.PrivateKey)
+ require.NoError(t, signature.Calculate(signer, cidBytes), "failed to calculate signature")
+
+ cc.get[hex.EncodeToString(cidBytes)] = &containercore.Container{
+ Value: cnr,
+ Signature: signature,
+ }
+
+ table := eacl.NewTable()
+ table.SetCID(cid)
+ table.SetVersion(version.Current())
+
+ r := &eacl.Record{}
+ r.AddObjectContainerIDFilter(eacl.MatchStringEqual, cid)
+
+ table.AddRecord(r)
+
+ nr := &payload.P2PNotaryRequest{
+ MainTransaction: &transaction.Transaction{},
+ }
+ event := containerEvent.SetEACL{
+ TableValue: table.ToV2().StableMarshal(nil),
+ PublicKeyValue: p.PublicKey().Bytes(),
+ SignatureValue: p.Sign(table.ToV2().StableMarshal(nil)),
+ NotaryRequestValue: nr,
+ }
+
+ proc.handleSetEACL(event)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var expectedPutEACL cntClient.PutEACLPrm
+ expectedPutEACL.SetTable(table.ToV2().StableMarshal(nil))
+ expectedPutEACL.SetKey(p.PublicKey().Bytes())
+ expectedPutEACL.SetSignature(p.Sign(table.ToV2().StableMarshal(nil)))
+
+ require.EqualValues(t, []*transaction.Transaction{nr.MainTransaction}, mc.transactions, "invalid notary requests")
+}
+
type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -170,11 +260,11 @@ type testNetworkState struct {
epoch uint64
}
-func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) {
+func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) {
return s.homHashDisabled, nil
}
-func (s *testNetworkState) Epoch(context.Context) (uint64, error) {
+func (s *testNetworkState) Epoch() (uint64, error) {
return s.epoch, nil
}
@@ -187,7 +277,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) {
key := hex.EncodeToString(cid)
if cont, found := c.get[key]; found {
return cont, nil
@@ -195,6 +285,14 @@ func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containerco
return nil, new(apistatus.ContainerNotFound)
}
+type testIDClient struct {
+ publicKeys keys.PublicKeys
+}
+
+func (c *testIDClient) AccountKeys(p frostfsid.AccountKeysPrm) (keys.PublicKeys, error) {
+ return c.publicKeys, nil
+}
+
var _ putEvent = &testPutEvent{}
type testPutEvent struct {
@@ -221,7 +319,6 @@ func (e *testPutEvent) Signature() []byte {
func (e *testPutEvent) SessionToken() []byte {
return e.st
}
-
func (e *testPutEvent) NotaryRequest() *payload.P2PNotaryRequest {
return e.nr
}
@@ -234,9 +331,3 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
c.transactions = append(c.transactions, mainTx)
return nil
}
-
-type testFrostFSIDClient struct{}
-
-func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
- return &frostfsidclient.Subject{}, nil
-}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index 8e4ab2623..2629b9d29 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -1,10 +1,7 @@
package container
import (
- "context"
- "errors"
"fmt"
- "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -13,7 +10,6 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"github.com/nspcc-dev/neo-go/pkg/network/payload"
- "github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -33,32 +29,30 @@ type putContainerContext struct {
d containerSDK.Domain
}
-var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner namespaces do not match")
-
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
-func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
- if !cp.alphabetState.IsAlphabet(ctx) {
- cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
+func (cp *Processor) processContainerPut(put putEvent) bool {
+ if !cp.alphabetState.IsAlphabet() {
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
return true
}
- pctx := &putContainerContext{
+ ctx := &putContainerContext{
e: put,
}
- err := cp.checkPutContainer(ctx, pctx)
+ err := cp.checkPutContainer(ctx)
if err != nil {
- cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
- zap.Error(err),
+ cp.log.Error(logs.ContainerPutContainerCheckFailed,
+ zap.String("error", err.Error()),
)
return false
}
- if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
- zap.Error(err),
+ if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
+ zap.String("error", err.Error()),
)
return false
}
@@ -66,8 +60,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
return true
}
-func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error {
- binCnr := pctx.e.Container()
+func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
+ binCnr := ctx.e.Container()
var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr)
@@ -75,12 +69,12 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
return fmt.Errorf("invalid binary container: %w", err)
}
- err = cp.verifySignature(ctx, signatureVerificationData{
+ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut,
- binTokenSession: pctx.e.SessionToken(),
- binPublicKey: pctx.e.PublicKey(),
- signature: pctx.e.Signature(),
+ binTokenSession: ctx.e.SessionToken(),
+ binPublicKey: ctx.e.PublicKey(),
+ signature: ctx.e.Signature(),
signedData: binCnr,
})
if err != nil {
@@ -88,13 +82,13 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
}
// check homomorphic hashing setting
- err = checkHomomorphicHashing(ctx, cp.netState, cnr)
+ err = checkHomomorphicHashing(cp.netState, cnr)
if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
}
// check native name and zone
- err = cp.checkNNS(ctx, pctx, cnr)
+ err = checkNNS(ctx, cnr)
if err != nil {
return fmt.Errorf("NNS: %w", err)
}
@@ -104,24 +98,24 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
-func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
- if !cp.alphabetState.IsAlphabet(ctx) {
- cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
+func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
+ if !cp.alphabetState.IsAlphabet() {
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return true
}
- err := cp.checkDeleteContainer(ctx, e)
+ err := cp.checkDeleteContainer(e)
if err != nil {
- cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
- zap.Error(err),
+ cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
+ zap.String("error", err.Error()),
)
return false
}
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
- cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
- zap.Error(err),
+ cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
+ zap.String("error", err.Error()),
)
return false
@@ -130,7 +124,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true
}
-func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error {
+func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
binCnr := e.ContainerID()
var idCnr cid.ID
@@ -141,12 +135,12 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
}
// receive owner of the related container
- cnr, err := cp.cnrClient.Get(ctx, binCnr)
+ cnr, err := cp.cnrClient.Get(binCnr)
if err != nil {
return fmt.Errorf("could not receive the container: %w", err)
}
- err = cp.verifySignature(ctx, signatureVerificationData{
+ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete,
idContainerSet: true,
@@ -163,53 +157,34 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
return nil
}
-func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error {
+func checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info
- pctx.d = containerSDK.ReadDomain(cnr)
+ ctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args
- if named, ok := pctx.e.(interface {
+ if named, ok := ctx.e.(interface {
Name() string
Zone() string
}); ok {
- if name := named.Name(); name != pctx.d.Name() {
- return fmt.Errorf("names differ %s/%s", name, pctx.d.Name())
+ if name := named.Name(); name != ctx.d.Name() {
+ return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
}
- if zone := named.Zone(); zone != pctx.d.Zone() {
- return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone())
+ if zone := named.Zone(); zone != ctx.d.Zone() {
+ return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
}
}
- addr, err := util.Uint160DecodeBytesBE(cnr.Owner().WalletBytes()[1 : 1+util.Uint160Size])
- if err != nil {
- return fmt.Errorf("could not get container owner address: %w", err)
- }
-
- subject, err := cp.frostFSIDClient.GetSubject(ctx, addr)
- if err != nil {
- return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
- }
-
- namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns")
- if !hasNamespace {
- return nil
- }
-
- if subject.Namespace != namespace {
- return errContainerAndOwnerNamespaceDontMatch
- }
-
return nil
}
-func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error {
- netSetting, err := ns.HomomorphicHashDisabled(ctx)
+func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
+ netSetting, err := ns.HomomorphicHashDisabled()
if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err)
}
- if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting {
+ if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting {
return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting)
}
diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go
new file mode 100644
index 000000000..8ab0d5c39
--- /dev/null
+++ b/pkg/innerring/processors/container/process_eacl.go
@@ -0,0 +1,82 @@
+package container
+
+import (
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
+ containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "go.uber.org/zap"
+)
+
+func (cp *Processor) processSetEACL(e containerEvent.SetEACL) bool {
+ if !cp.alphabetState.IsAlphabet() {
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL)
+ return true
+ }
+
+ err := cp.checkSetEACL(e)
+ if err != nil {
+ cp.log.Error(logs.ContainerSetEACLCheckFailed,
+ zap.String("error", err.Error()),
+ )
+
+ return false
+ }
+
+ if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
+ cp.log.Error(logs.ContainerCouldNotApproveSetEACL,
+ zap.String("error", err.Error()),
+ )
+ return false
+ }
+
+ return true
+}
+
+func (cp *Processor) checkSetEACL(e containerEvent.SetEACL) error {
+ binTable := e.Table()
+
+ // unmarshal table
+ table := eacl.NewTable()
+
+ err := table.Unmarshal(binTable)
+ if err != nil {
+ return fmt.Errorf("invalid binary table: %w", err)
+ }
+
+ idCnr, ok := table.CID()
+ if !ok {
+ return errors.New("missing container ID in eACL table")
+ }
+
+ // receive owner of the related container
+ cnr, err := cntClient.Get(cp.cnrClient, idCnr)
+ if err != nil {
+ return fmt.Errorf("could not receive the container: %w", err)
+ }
+
+ // ACL extensions can be disabled by basic ACL, check it
+ if !cnr.Value.BasicACL().Extendable() {
+ return errors.New("ACL extension disabled by container basic ACL")
+ }
+
+ err = cp.verifySignature(signatureVerificationData{
+ ownerContainer: cnr.Value.Owner(),
+ verb: session.VerbContainerSetEACL,
+ idContainerSet: true,
+ idContainer: idCnr,
+ binTokenSession: e.SessionToken(),
+ binPublicKey: e.PublicKey(),
+ signature: e.Signature(),
+ signedData: binTable,
+ })
+ if err != nil {
+ return fmt.Errorf("auth eACL table setting: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 9be93baa4..fd5348c6f 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -1,51 +1,53 @@
package container
import (
- "context"
"errors"
"fmt"
- frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
ContClient interface {
ContractAddress() util.Uint160
- Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ Get(cid []byte) (*containercore.Container, error)
}
MorphClient interface {
NotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
- FrostFSIDClient interface {
- GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error)
+ IDClient interface {
+ AccountKeys(p frostfsid.AccountKeysPrm) (keys.PublicKeys, error)
}
// Processor of events produced by container contract in the sidechain.
Processor struct {
- log *logger.Logger
- metrics metrics.Register
- pool *ants.Pool
- alphabetState AlphabetState
- cnrClient ContClient // notary must be enabled
- morphClient MorphClient
- netState NetworkState
- frostFSIDClient FrostFSIDClient
+ log *logger.Logger
+ metrics metrics.Register
+ pool *ants.Pool
+ alphabetState AlphabetState
+ cnrClient ContClient // notary must be enabled
+ morphClient MorphClient
+ idClient IDClient
+ netState NetworkState
}
// Params of the processor constructor.
@@ -56,8 +58,8 @@ type (
AlphabetState AlphabetState
ContainerClient ContClient
MorphClient MorphClient
+ FrostFSIDClient IDClient
NetworkState NetworkState
- FrostFSIDClient FrostFSIDClient
}
)
@@ -68,7 +70,7 @@ type NetworkState interface {
//
// Must return any error encountered
// which did not allow reading the value.
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
// HomomorphicHashDisabled must return boolean that
// represents homomorphic network state:
@@ -76,7 +78,7 @@ type NetworkState interface {
// * false if hashing is enabled.
//
// which did not allow reading the value.
- HomomorphicHashDisabled(ctx context.Context) (bool, error)
+ HomomorphicHashDisabled() (bool, error)
}
// New creates a container contract processor instance.
@@ -90,12 +92,14 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: Container client is not set")
case p.MorphClient == nil:
return nil, errors.New("ir/container: Morph client is not set")
+ case p.FrostFSIDClient == nil:
+ return nil, errors.New("ir/container: FrostFS ID client is not set")
case p.NetworkState == nil:
return nil, errors.New("ir/container: network state is not set")
- case p.FrostFSIDClient == nil:
- return nil, errors.New("ir/container: FrostFSID client is not set")
}
+ p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
@@ -107,17 +111,22 @@ func New(p *Params) (*Processor, error) {
}
return &Processor{
- log: p.Log,
- metrics: metricsRegister,
- pool: pool,
- alphabetState: p.AlphabetState,
- cnrClient: p.ContainerClient,
- netState: p.NetworkState,
- morphClient: p.MorphClient,
- frostFSIDClient: p.FrostFSIDClient,
+ log: p.Log,
+ metrics: metricsRegister,
+ pool: pool,
+ alphabetState: p.AlphabetState,
+ cnrClient: p.ContainerClient,
+ idClient: p.FrostFSIDClient,
+ netState: p.NetworkState,
+ morphClient: p.MorphClient,
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ return nil
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
return nil
@@ -149,6 +158,11 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
+ // set EACL
+ p.SetRequestType(containerEvent.SetEACLNotaryEvent)
+ p.SetParser(containerEvent.ParseSetEACLNotary)
+ pp = append(pp, p)
+
return pp
}
@@ -177,5 +191,10 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
+ // set eACL
+ h.SetRequestType(containerEvent.SetEACLNotaryEvent)
+ h.SetHandler(cp.handleSetEACL)
+ hh = append(hh, h)
+
return hh
}
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index 936de2e77..ab53d5c48 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -1,79 +1,73 @@
package frostfs
import (
- "bytes"
- "context"
"encoding/hex"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.uber.org/zap"
)
-func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
+func (np *Processor) handleDeposit(ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
- depositIDBin := bytes.Clone(deposit.ID())
- slices.Reverse(depositIDBin)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "deposit"),
- zap.String("id", hex.EncodeToString(depositIDBin)))
+ zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
- return np.processDeposit(ctx, deposit)
+ return np.processDeposit(deposit)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
+func (np *Processor) handleWithdraw(ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
- withdrawBin := bytes.Clone(withdraw.ID())
- slices.Reverse(withdrawBin)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "withdraw"),
- zap.String("id", hex.EncodeToString(withdrawBin)))
+ zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
- return np.processWithdraw(ctx, withdraw)
+ return np.processWithdraw(withdraw)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
+func (np *Processor) handleCheque(ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
- return np.processCheque(ctx, cheque)
+ return np.processCheque(cheque)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
+func (np *Processor) handleConfig(ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -81,11 +75,47 @@ func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
- return np.processConfig(ctx, cfg)
+ return np.processConfig(cfg)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ zap.Int("capacity", np.pool.Cap()))
+ }
+}
+
+func (np *Processor) handleBind(ev event.Event) {
+ e := ev.(frostfsEvent.Bind)
+ np.log.Info(logs.Notification,
+ zap.String("type", "bind"),
+ )
+
+ // send event to the worker pool
+
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_bind", func() bool {
+ return np.processBind(e, true)
+ })
+ if err != nil {
+ // there system can be moved into controlled degradation stage
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+ zap.Int("capacity", np.pool.Cap()))
+ }
+}
+
+func (np *Processor) handleUnbind(ev event.Event) {
+ e := ev.(frostfsEvent.Unbind)
+ np.log.Info(logs.Notification,
+ zap.String("type", "unbind"),
+ )
+
+ // send event to the worker pool
+
+ err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_unbind", func() bool {
+ return np.processBind(e, false)
+ })
+ if err != nil {
+ // there system can be moved into controlled degradation stage
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go
index 72310f6f9..db7835811 100644
--- a/pkg/innerring/processors/frostfs/handlers_test.go
+++ b/pkg/innerring/processors/frostfs/handlers_test.go
@@ -1,14 +1,16 @@
package frostfs
import (
- "context"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
@@ -37,7 +39,7 @@ func TestHandleDeposit(t *testing.T) {
AmountValue: 1000,
}
- proc.handleDeposit(context.Background(), ev)
+ proc.handleDeposit(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -58,7 +60,7 @@ func TestHandleDeposit(t *testing.T) {
es.epochCounter = 109
- proc.handleDeposit(context.Background(), ev)
+ proc.handleDeposit(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -99,7 +101,7 @@ func TestHandleWithdraw(t *testing.T) {
AmountValue: 1000,
}
- proc.handleWithdraw(context.Background(), ev)
+ proc.handleWithdraw(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -140,7 +142,7 @@ func TestHandleCheque(t *testing.T) {
LockValue: util.Uint160{200},
}
- proc.handleCheque(context.Background(), ev)
+ proc.handleCheque(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -177,7 +179,7 @@ func TestHandleConfig(t *testing.T) {
TxHashValue: util.Uint256{100},
}
- proc.handleConfig(context.Background(), ev)
+ proc.handleConfig(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -192,11 +194,80 @@ func TestHandleConfig(t *testing.T) {
require.EqualValues(t, []nmClient.SetConfigPrm{expConfig}, nm.config, "invalid config value")
}
+func TestHandleUnbind(t *testing.T) {
+ t.Parallel()
+ es := &testEpochState{
+ epochCounter: 100,
+ }
+ m := &testMorphClient{
+ balance: 150,
+ }
+ id := &testIDClient{}
+ proc, err := newTestProc(t, func(p *Params) {
+ p.EpochState = es
+ p.MorphClient = m
+ p.FrostFSIDClient = id
+ })
+ require.NoError(t, err, "failed to create processor")
+
+ p, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ evUnbind := frostfsEvent.Unbind{
+ BindCommon: frostfsEvent.BindCommon{
+ UserValue: util.Uint160{49}.BytesBE(),
+ KeysValue: [][]byte{
+ p.PublicKey().Bytes(),
+ },
+ TxHashValue: util.Uint256{100},
+ },
+ }
+
+ proc.handleUnbind(evUnbind)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ var userID user.ID
+ userID.SetScriptHash(util.Uint160{49})
+
+ var expBind frostfsid.CommonBindPrm
+ expBind.SetOwnerID(userID.WalletBytes())
+ expBind.SetKeys(evUnbind.BindCommon.KeysValue)
+ expBind.SetHash(evUnbind.BindCommon.TxHashValue)
+
+ var expNilSlice []frostfsid.CommonBindPrm
+
+ require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.remove, "invalid remove keys value")
+ require.EqualValues(t, expNilSlice, id.add, "invalid add keys value")
+
+ evBind := frostfsEvent.Bind{
+ BindCommon: frostfsEvent.BindCommon{
+ UserValue: util.Uint160{49}.BytesBE(),
+ KeysValue: [][]byte{
+ p.PublicKey().Bytes(),
+ },
+ TxHashValue: util.Uint256{100},
+ },
+ }
+
+ proc.handleBind(evBind)
+
+ for proc.pool.Running() > 0 {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.remove, "invalid remove keys value")
+ require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.add, "invalid add keys value")
+}
+
func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
p := &Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 1,
FrostFSContract: util.Uint160{0},
+ FrostFSIDClient: &testIDClient{},
BalanceClient: &testBalaceClient{},
NetmapClient: &testNetmapClient{},
MorphClient: &testMorphClient{},
@@ -226,11 +297,12 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
-type testPrecisionConverter struct{}
+type testPrecisionConverter struct {
+}
func (c *testPrecisionConverter) ToBalancePrecision(v int64) int64 {
return v
@@ -242,17 +314,15 @@ type testBalaceClient struct {
burn []balance.BurnPrm
}
-func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
+func (c *testBalaceClient) Mint(p balance.MintPrm) error {
c.mint = append(c.mint, p)
return nil
}
-
-func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
+func (c *testBalaceClient) Lock(p balance.LockPrm) error {
c.lock = append(c.lock, p)
return nil
}
-
-func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
+func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
c.burn = append(c.burn, p)
return nil
}
@@ -261,7 +331,7 @@ type testNetmapClient struct {
config []nmClient.SetConfigPrm
}
-func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
+func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
c.config = append(c.config, p)
return nil
}
@@ -279,7 +349,6 @@ type testMorphClient struct {
func (c *testMorphClient) GasBalance() (res int64, err error) {
return c.balance, nil
}
-
func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error {
c.transferGas = append(c.transferGas, transferGas{
receiver: receiver,
@@ -287,3 +356,18 @@ func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed
})
return nil
}
+
+type testIDClient struct {
+ add []frostfsid.CommonBindPrm
+ remove []frostfsid.CommonBindPrm
+}
+
+func (c *testIDClient) AddKeys(p frostfsid.CommonBindPrm) error {
+ c.add = append(c.add, p)
+ return nil
+}
+
+func (c *testIDClient) RemoveKeys(args frostfsid.CommonBindPrm) error {
+ c.remove = append(c.remove, args)
+ return nil
+}
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index d10eb9660..c72aeceee 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,8 +1,6 @@
package frostfs
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -17,9 +15,9 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
-func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
+func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
return true
}
@@ -30,9 +28,9 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
prm.SetID(deposit.ID())
// send transferX to a balance contract
- err := np.balanceClient.Mint(ctx, prm)
+ err := np.balanceClient.Mint(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@@ -46,7 +44,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
- np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
+ np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@@ -58,12 +56,12 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
+ np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return false
}
if balance < np.gasBalanceThreshold {
- np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
+ np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@@ -72,8 +70,8 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
- zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
+ zap.String("error", err.Error()))
return false
}
@@ -84,16 +82,16 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
}
// Process withdraw event by locking assets in the balance account.
-func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
+func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
+ np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
return false
}
@@ -107,9 +105,9 @@ func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
- err = np.balanceClient.Lock(ctx, prm)
+ err = np.balanceClient.Lock(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
+ np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
return false
}
@@ -118,9 +116,9 @@ func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
-func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
+func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
return true
}
@@ -130,9 +128,9 @@ func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheq
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
- err := np.balanceClient.Burn(ctx, prm)
+ err := np.balanceClient.Burn(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/process_bind.go b/pkg/innerring/processors/frostfs/process_bind.go
new file mode 100644
index 000000000..50c6bf5f5
--- /dev/null
+++ b/pkg/innerring/processors/frostfs/process_bind.go
@@ -0,0 +1,109 @@
+package frostfs
+
+import (
+ "crypto/elliptic"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "go.uber.org/zap"
+)
+
+type bindCommon interface {
+ User() []byte
+ Keys() [][]byte
+ TxHash() util.Uint256
+}
+
+func (np *Processor) processBind(e bindCommon, bind bool) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreBind)
+ return true
+ }
+
+ c := &bindCommonContext{
+ bindCommon: e,
+ bind: bind,
+ }
+
+ err := np.checkBindCommon(c)
+ if err != nil {
+ np.log.Error(logs.FrostFSInvalidManageKeyEvent,
+ zap.Bool("bind", c.bind),
+ zap.String("error", err.Error()),
+ )
+
+ return false
+ }
+
+ return np.approveBindCommon(c) == nil
+}
+
+type bindCommonContext struct {
+ bindCommon
+
+ bind bool
+
+ scriptHash util.Uint160
+}
+
+func (np *Processor) checkBindCommon(e *bindCommonContext) error {
+ var err error
+
+ e.scriptHash, err = util.Uint160DecodeBytesBE(e.User())
+ if err != nil {
+ return err
+ }
+
+ curve := elliptic.P256()
+
+ for _, key := range e.Keys() {
+ _, err = keys.NewPublicKeyFromBytes(key, curve)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (np *Processor) approveBindCommon(e *bindCommonContext) error {
+ // calculate wallet address
+ scriptHash := e.User()
+
+ u160, err := util.Uint160DecodeBytesBE(scriptHash)
+ if err != nil {
+ np.log.Error(logs.FrostFSCouldNotDecodeScriptHashFromBytes,
+ zap.String("error", err.Error()),
+ )
+
+ return err
+ }
+
+ var id user.ID
+ id.SetScriptHash(u160)
+
+ prm := frostfsid.CommonBindPrm{}
+ prm.SetOwnerID(id.WalletBytes())
+ prm.SetKeys(e.Keys())
+ prm.SetHash(e.bindCommon.TxHash())
+
+ var typ string
+ if e.bind {
+ typ = "bind"
+ err = np.frostfsIDClient.AddKeys(prm)
+ } else {
+ typ = "unbind"
+ err = np.frostfsIDClient.RemoveKeys(prm)
+ }
+
+ if err != nil {
+ np.log.Error(fmt.Sprintf("could not approve %s", typ),
+ zap.String("error", err.Error()))
+ }
+
+ return err
+}
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index dc579f6bb..2ae3e6ced 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,8 +1,6 @@
package frostfs
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -11,9 +9,9 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
-func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
+func (np *Processor) processConfig(config frostfsEvent.Config) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
return true
}
@@ -24,9 +22,9 @@ func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Conf
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
- err := np.netmapClient.SetConfig(ctx, prm)
+ err := np.netmapClient.SetConfig(prm)
if err != nil {
- np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
+ np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index 9d3bf65cd..20f44adcd 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -1,13 +1,14 @@
package frostfs
import (
- "context"
"errors"
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -16,6 +17,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
@@ -26,7 +28,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// PrecisionConverter converts balance amount values.
@@ -35,13 +37,13 @@ type (
}
BalanceClient interface {
- Mint(ctx context.Context, p balance.MintPrm) error
- Lock(ctx context.Context, p balance.LockPrm) error
- Burn(ctx context.Context, p balance.BurnPrm) error
+ Mint(p balance.MintPrm) error
+ Lock(p balance.LockPrm) error
+ Burn(p balance.BurnPrm) error
}
NetmapClient interface {
- SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
+ SetConfig(p nmClient.SetConfigPrm) error
}
MorphClient interface {
@@ -49,6 +51,11 @@ type (
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
}
+ IDClient interface {
+ AddKeys(p frostfsid.CommonBindPrm) error
+ RemoveKeys(args frostfsid.CommonBindPrm) error
+ }
+
// Processor of events produced by frostfs contract in main net.
Processor struct {
log *logger.Logger
@@ -66,6 +73,7 @@ type (
mintEmitThreshold uint64
mintEmitValue fixedn.Fixed8
gasBalanceThreshold int64
+ frostfsIDClient IDClient
}
// Params of the processor constructor.
@@ -74,6 +82,7 @@ type (
Metrics metrics.Register
PoolSize int
FrostFSContract util.Uint160
+ FrostFSIDClient IDClient
BalanceClient BalanceClient
NetmapClient NetmapClient
MorphClient MorphClient
@@ -92,6 +101,8 @@ const (
withdrawNotification = "Withdraw"
chequeNotification = "Cheque"
configNotification = "SetConfig"
+ bindNotification = "Bind"
+ unbindNotification = "Unbind"
)
// New creates frostfs mainnet contract processor instance.
@@ -109,6 +120,8 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
+ p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
@@ -139,37 +152,95 @@ func New(p *Params) (*Processor, error) {
mintEmitThreshold: p.MintEmitThreshold,
mintEmitValue: p.MintEmitValue,
gasBalanceThreshold: p.GasBalanceThreshold,
+
+ frostfsIDClient: p.FrostFSIDClient,
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var (
+ parsers = make([]event.NotificationParserInfo, 0, 6)
+
+ p event.NotificationParserInfo
+ )
+
+ p.SetScriptHash(np.frostfsContract)
+
+ // deposit event
+ p.SetType(event.TypeFromString(depositNotification))
+ p.SetParser(frostfsEvent.ParseDeposit)
+ parsers = append(parsers, p)
+
+ // withdraw event
+ p.SetType(event.TypeFromString(withdrawNotification))
+ p.SetParser(frostfsEvent.ParseWithdraw)
+ parsers = append(parsers, p)
+
+ // cheque event
+ p.SetType(event.TypeFromString(chequeNotification))
+ p.SetParser(frostfsEvent.ParseCheque)
+ parsers = append(parsers, p)
+
+ // config event
+ p.SetType(event.TypeFromString(configNotification))
+ p.SetParser(frostfsEvent.ParseConfig)
+ parsers = append(parsers, p)
+
+ // bind event
+ p.SetType(event.TypeFromString(bindNotification))
+ p.SetParser(frostfsEvent.ParseBind)
+ parsers = append(parsers, p)
+
+ // unbind event
+ p.SetType(event.TypeFromString(unbindNotification))
+ p.SetParser(frostfsEvent.ParseUnbind)
+ parsers = append(parsers, p)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(depositNotification),
- Parser: frostfsEvent.ParseDeposit,
- Handlers: []event.Handler{np.handleDeposit},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(withdrawNotification),
- Parser: frostfsEvent.ParseWithdraw,
- Handlers: []event.Handler{np.handleWithdraw},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(chequeNotification),
- Parser: frostfsEvent.ParseCheque,
- Handlers: []event.Handler{np.handleCheque},
- },
- {
- Contract: np.frostfsContract,
- Type: event.TypeFromString(configNotification),
- Parser: frostfsEvent.ParseConfig,
- Handlers: []event.Handler{np.handleConfig},
- },
- }
+ var (
+ handlers = make([]event.NotificationHandlerInfo, 0, 6)
+
+ h event.NotificationHandlerInfo
+ )
+
+ h.SetScriptHash(np.frostfsContract)
+
+ // deposit handler
+ h.SetType(event.TypeFromString(depositNotification))
+ h.SetHandler(np.handleDeposit)
+ handlers = append(handlers, h)
+
+ // withdraw handler
+ h.SetType(event.TypeFromString(withdrawNotification))
+ h.SetHandler(np.handleWithdraw)
+ handlers = append(handlers, h)
+
+ // cheque handler
+ h.SetType(event.TypeFromString(chequeNotification))
+ h.SetHandler(np.handleCheque)
+ handlers = append(handlers, h)
+
+ // config handler
+ h.SetType(event.TypeFromString(configNotification))
+ h.SetHandler(np.handleConfig)
+ handlers = append(handlers, h)
+
+ // bind handler
+ h.SetType(event.TypeFromString(bindNotification))
+ h.SetHandler(np.handleBind)
+ handlers = append(handlers, h)
+
+ // unbind handler
+ h.SetType(event.TypeFromString(unbindNotification))
+ h.SetHandler(np.handleUnbind)
+ handlers = append(handlers, h)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index 7e8ab629d..fd7f539c3 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,8 +1,6 @@
package governance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -13,7 +11,7 @@ import (
"go.uber.org/zap"
)
-func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
+func (gp *Processor) HandleAlphabetSync(e event.Event) {
var (
typ string
hash util.Uint256
@@ -34,16 +32,16 @@ func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
return
}
- gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
+ gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
- return gp.processAlphabetSync(ctx, hash)
+ return gp.processAlphabetSync(hash)
})
if err != nil {
// there system can be moved into controlled degradation stage
- gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
+ gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index 864c5da67..63d156dac 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"encoding/binary"
"sort"
"testing"
@@ -9,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
+ nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -38,10 +38,11 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
+ nm := &testNetmapClient{}
proc, err := New(
&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
EpochState: es,
AlphabetState: as,
Voter: v,
@@ -49,6 +50,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
+ NetmapClient: nm,
},
)
@@ -58,7 +60,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
txHash: util.Uint256{100},
}
- proc.HandleAlphabetSync(context.Background(), ev)
+ proc.HandleAlphabetSync(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -71,6 +73,10 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
+ var irUpdateExp []nmClient.UpdateIRPrm
+
+ require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
+
var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@@ -113,10 +119,11 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
+ nm := &testNetmapClient{}
proc, err := New(
&Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
EpochState: es,
AlphabetState: as,
Voter: v,
@@ -124,6 +131,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
+ NetmapClient: nm,
},
)
@@ -134,7 +142,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
Role: noderoles.NeoFSAlphabet,
}
- proc.HandleAlphabetSync(context.Background(), ev)
+ proc.HandleAlphabetSync(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -147,6 +155,9 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
+ var irUpdatesExp []nmClient.UpdateIRPrm
+ require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
+
var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash)
@@ -179,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys {
for {
var result testKeys
- for range 4 {
+ for i := 0; i < 4; i++ {
pk, err := keys.NewPrivateKey()
require.NoError(t, err, "failed to create private key")
result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
@@ -194,7 +205,7 @@ func generateTestKeys(t *testing.T) testKeys {
require.NoError(t, err, "failed to create expected new alphabet")
if len(result.newAlphabetExp) == 0 {
- continue // can be happen because of random and sort
+ continue //can be happen because of random and sort
}
var irKeys keys.PublicKeys
@@ -219,7 +230,7 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
@@ -227,7 +238,7 @@ type testVoter struct {
votes []VoteValidatorPrm
}
-func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
+func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error {
v.votes = append(v.votes, prm)
return nil
}
@@ -236,7 +247,7 @@ type testIRFetcher struct {
publicKeys keys.PublicKeys
}
-func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) {
+func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
return f.publicKeys, nil
}
@@ -251,12 +262,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
return c.commiteeKeys, nil
}
-func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
+func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
c.alphabetUpdates = append(c.alphabetUpdates, prm)
return nil
}
-func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
+func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
c.notaryUpdates = append(c.notaryUpdates, prm)
return nil
}
@@ -266,7 +277,7 @@ type testMainnetClient struct {
designateHash util.Uint160
}
-func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) {
+func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
return c.alphabetKeys, nil
}
@@ -278,7 +289,16 @@ type testFrostFSClient struct {
updates []frostfscontract.AlphabetUpdatePrm
}
-func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
+func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
+ c.updates = append(c.updates, p)
+ return nil
+}
+
+type testNetmapClient struct {
+ updates []nmClient.UpdateIRPrm
+}
+
+func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
c.updates = append(c.updates, p)
return nil
}
diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go
index 4ecebf05b..d099ec837 100644
--- a/pkg/innerring/processors/governance/list_test.go
+++ b/pkg/innerring/processors/governance/list_test.go
@@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) {
}
ln := len(rounds)
- for i := range ln {
+ for i := 0; i < ln; i++ {
list, err = newAlphabetList(list, exp)
require.NoError(t, err)
require.True(t, equalPublicKeyLists(list, rounds[i]))
@@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) {
func generateKeys(n int) (keys.PublicKeys, error) {
pubKeys := make(keys.PublicKeys, 0, n)
- for range n {
+ for i := 0; i < n; i++ {
privKey, err := keys.NewPrivateKey()
if err != nil {
return nil, err
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 6e22abb3c..50ba58e77 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"encoding/binary"
"encoding/hex"
"sort"
@@ -19,39 +18,39 @@ const (
alphabetUpdateIDPrefix = "AlphabetUpdate"
)
-func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
- if !gp.alphabetState.IsAlphabet(ctx) {
- gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
+func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
+ if !gp.alphabetState.IsAlphabet() {
+ gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return true
}
- mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx)
+ mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
+ zap.String("error", err.Error()))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
+ zap.String("error", err.Error()))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
+ zap.String("error", err.Error()))
return false
}
if newAlphabet == nil {
- gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
+ gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
return true
}
- gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
+ gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@@ -62,22 +61,22 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
}
// 1. Vote to sidechain committee via alphabet contracts.
- err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
+ err = gp.voter.VoteForSidechainValidator(votePrm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
+ zap.String("error", err.Error()))
}
// 2. Update NeoFSAlphabet role in the sidechain.
- gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
+ gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
// 3. Update notary role in the sidechain.
- gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
+ gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
- gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
+ gp.updateFrostFSContractInMainnet(newAlphabet)
- gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
+ gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
return true
}
@@ -94,24 +93,24 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
-func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
- innerRing, err := gp.irFetcher.InnerRingKeys(ctx)
+func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+ innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
+ zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
+ zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
- gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
+ gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@@ -120,26 +119,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
updPrm.SetList(newInnerRing)
updPrm.SetHash(txHash)
- if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
- zap.Error(err))
+ if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
+ gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
+ zap.String("error", err.Error()))
}
}
-func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
+func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
updPrm := client.UpdateNotaryListPrm{}
updPrm.SetList(newAlphabet)
updPrm.SetHash(txHash)
- err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
+ err := gp.morphClient.UpdateNotaryList(updPrm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
+ zap.String("error", err.Error()))
}
}
-func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
+func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
epoch := gp.epochState.EpochCounter()
buf := make([]byte, 8)
@@ -152,9 +151,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
prm.SetID(id)
prm.SetPubs(newAlphabet)
- err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
+ err := gp.frostfsClient.AlphabetUpdate(prm)
if err != nil {
- gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
- zap.Error(err))
+ gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
+ zap.String("error", err.Error()))
}
}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index 2d131edda..fa267eade 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -1,7 +1,6 @@
package governance
import (
- "context"
"errors"
"fmt"
@@ -26,7 +25,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
)
@@ -39,7 +38,7 @@ type VoteValidatorPrm struct {
// Voter is a callback interface for alphabet contract voting.
type Voter interface {
- VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
+ VoteForSidechainValidator(VoteValidatorPrm) error
}
type (
@@ -52,11 +51,11 @@ type (
// Implementation must take into account availability of
// the notary contract.
IRFetcher interface {
- InnerRingKeys(ctx context.Context) (keys.PublicKeys, error)
+ InnerRingKeys() (keys.PublicKeys, error)
}
FrostFSClient interface {
- AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
+ AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
}
NetmapClient interface {
@@ -64,14 +63,14 @@ type (
}
MainnetClient interface {
- NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error)
+ NeoFSAlphabetList() (res keys.PublicKeys, err error)
GetDesignateHash() util.Uint160
}
MorphClient interface {
Committee() (res keys.PublicKeys, err error)
- UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
- UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
+ UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
+ UpdateNotaryList(prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.
@@ -80,6 +79,7 @@ type (
metrics metrics.Register
pool *ants.Pool
frostfsClient FrostFSClient
+ netmapClient NetmapClient
alphabetState AlphabetState
epochState EpochState
@@ -105,6 +105,7 @@ type (
MorphClient MorphClient
MainnetClient MainnetClient
FrostFSClient FrostFSClient
+ NetmapClient NetmapClient
}
)
@@ -145,6 +146,7 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
+ netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState,
epochState: p.EpochState,
voter: p.Voter,
@@ -155,16 +157,22 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ var pi event.NotificationParserInfo
+ pi.SetScriptHash(gp.designate)
+ pi.SetType(event.TypeFromString(native.DesignationEventName))
+ pi.SetParser(rolemanagement.ParseDesignate)
+ return []event.NotificationParserInfo{pi}
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: gp.designate,
- Type: event.TypeFromString(native.DesignationEventName),
- Parser: rolemanagement.ParseDesignate,
- Handlers: []event.Handler{gp.HandleAlphabetSync},
- },
- }
+ var hi event.NotificationHandlerInfo
+ hi.SetScriptHash(gp.designate)
+ hi.SetType(event.TypeFromString(native.DesignationEventName))
+ hi.SetHandler(gp.HandleAlphabetSync)
+ return []event.NotificationHandlerInfo{hi}
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index abd5b089a..80117247d 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -24,8 +24,6 @@ type (
epochStamp
binNodeInfo []byte
-
- maintenance bool
}
)
@@ -60,7 +58,6 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
- access.maintenance = nmNodes[i].Status().IsMaintenance()
newMap[keyString] = access
}
@@ -82,7 +79,10 @@ func (c *cleanupTable) touch(keyString string, now uint64, binNodeInfo []byte) b
result := !ok || access.removeFlag || !bytes.Equal(access.binNodeInfo, binNodeInfo)
access.removeFlag = false // reset remove flag on each touch
- access.epoch = max(access.epoch, now)
+ if now > access.epoch {
+ access.epoch = now
+ }
+
access.binNodeInfo = binNodeInfo // update binary node info
c.lastAccess[keyString] = access
@@ -105,7 +105,7 @@ func (c *cleanupTable) forEachRemoveCandidate(epoch uint64, f func(string) error
defer c.Unlock()
for keyString, access := range c.lastAccess {
- if !access.maintenance && epoch-access.epoch > c.threshold {
+ if epoch-access.epoch > c.threshold {
access.removeFlag = true // set remove flag
c.lastAccess[keyString] = access
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index 208bd5496..959710393 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -124,21 +124,6 @@ func TestCleanupTable(t *testing.T) {
}))
require.EqualValues(t, len(infos)-1, cnt)
})
-
- t.Run("skip maintenance nodes", func(t *testing.T) {
- cnt := 0
- infos[1].SetStatus(netmap.Maintenance)
- key := netmap.StringifyPublicKey(infos[1])
- c.update(networkMap, 5)
-
- require.NoError(t,
- c.forEachRemoveCandidate(5, func(s string) error {
- cnt++
- require.NotEqual(t, s, key)
- return nil
- }))
- require.EqualValues(t, len(infos)-1, cnt)
- })
})
}
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index 4c7199a49..c6053e281 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,93 +11,93 @@ import (
"go.uber.org/zap"
)
-func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
+func (np *Processor) HandleNewEpochTick(ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
- np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
- err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
+ err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
+func (np *Processor) handleNewEpoch(ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
- return np.processNewEpoch(ctx, epochEvent)
+ return np.processNewEpoch(epochEvent)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
+func (np *Processor) handleAddPeer(ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
- return np.processAddPeer(ctx, newPeer)
+ return np.processAddPeer(newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
+func (np *Processor) handleUpdateState(ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info(ctx, logs.Notification,
+ np.log.Info(logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
- return np.processUpdatePeer(ctx, updPeer)
+ return np.processUpdatePeer(updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
-func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
+func (np *Processor) handleCleanupTick(ev event.Event) {
if !np.netmapSnapshot.enabled {
- np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
+ np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
- np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
- return np.processNetmapCleanupTick(ctx, cleanup)
+ return np.processNetmapCleanupTick(cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
index 934c3790d..6c9e265cc 100644
--- a/pkg/innerring/processors/netmap/handlers_test.go
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -1,19 +1,19 @@
package netmap
import (
- "context"
"fmt"
"testing"
"time"
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -39,7 +39,7 @@ func TestNewEpochTick(t *testing.T) {
require.NoError(t, err, "failed to create processor")
ev := timerEvent.NewEpochTick{}
- proc.HandleNewEpochTick(context.Background(), ev)
+ proc.HandleNewEpochTick(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -68,6 +68,7 @@ func TestNewEpoch(t *testing.T) {
duration: 10,
}
r := &testEpochResetter{}
+ cc := &testContainerClient{}
nc := &testNetmapClient{
epochDuration: 20,
txHeights: map[util.Uint256]uint32{
@@ -81,6 +82,7 @@ func TestNewEpoch(t *testing.T) {
p.NotaryDepositHandler = eh.Handle
p.AlphabetSyncHandler = eh.Handle
p.NetmapClient = nc
+ p.ContainerWrapper = cc
p.EpochTimer = r
p.EpochState = es
})
@@ -91,7 +93,7 @@ func TestNewEpoch(t *testing.T) {
Num: 101,
Hash: util.Uint256{101},
}
- proc.handleNewEpoch(context.Background(), ev)
+ proc.handleNewEpoch(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -101,6 +103,11 @@ func TestNewEpoch(t *testing.T) {
require.Equal(t, ev.Num, es.counter, "invalid epoch counter")
require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets")
+ var expEstimation cntClient.StartEstimationPrm
+ expEstimation.SetEpoch(ev.Num - 1)
+ expEstimation.SetHash(ev.Hash)
+ require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations")
+
require.EqualValues(t, []event.Event{
governance.NewSyncEvent(ev.TxHash()),
ev,
@@ -131,22 +138,7 @@ func TestAddPeer(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleAddPeer(context.Background(), ev)
-
- for proc.pool.Running() > 0 {
- time.Sleep(10 * time.Millisecond)
- }
-
- require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
-
- node.SetStatus(netmap.Online)
- ev = netmapEvent.AddPeer{
- NodeBytes: node.Marshal(),
- Request: &payload.P2PNotaryRequest{
- MainTransaction: &transaction.Transaction{},
- },
- }
- proc.handleAddPeer(context.Background(), ev)
+ proc.handleAddPeer(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -189,7 +181,7 @@ func TestUpdateState(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
- proc.handleUpdateState(context.Background(), ev)
+ proc.handleUpdateState(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -233,7 +225,7 @@ func TestCleanupTick(t *testing.T) {
txHash: util.Uint256{123},
}
- proc.handleCleanupTick(context.Background(), ev)
+ proc.handleCleanupTick(ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@@ -267,11 +259,12 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
as := &testAlphabetState{
isAlphabet: true,
}
+ cc := &testContainerClient{}
nc := &testNetmapClient{}
eh := &testEventHandler{}
p := &Params{
- Log: test.NewLogger(t),
+ Log: test.NewLogger(t, true),
PoolSize: 1,
CleanupEnabled: false,
CleanupThreshold: 3,
@@ -280,6 +273,7 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
EpochState: es,
EpochTimer: r,
AlphabetState: as,
+ ContainerWrapper: cc,
NetmapClient: nc,
NotaryDepositHandler: eh.Handle,
AlphabetSyncHandler: eh.Handle,
@@ -294,7 +288,7 @@ type testNodeStateSettings struct {
maintAllowed bool
}
-func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
+func (s *testNodeStateSettings) MaintenanceModeAllowed() error {
if s.maintAllowed {
return nil
}
@@ -303,7 +297,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
type testValidator struct{}
-func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error {
+func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error {
return nil
}
@@ -315,15 +309,12 @@ type testEpochState struct {
func (s *testEpochState) SetEpochCounter(c uint64) {
s.counter = c
}
-
func (s *testEpochState) EpochCounter() uint64 {
return s.counter
}
-
func (s *testEpochState) SetEpochDuration(d uint64) {
s.duration = d
}
-
func (s *testEpochState) EpochDuration() uint64 {
return s.duration
}
@@ -341,10 +332,19 @@ type testAlphabetState struct {
isAlphabet bool
}
-func (s *testAlphabetState) IsAlphabet(context.Context) bool {
+func (s *testAlphabetState) IsAlphabet() bool {
return s.isAlphabet
}
+type testContainerClient struct {
+ estimations []cntClient.StartEstimationPrm
+}
+
+func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error {
+ c.estimations = append(c.estimations, p)
+ return nil
+}
+
type notaryInvoke struct {
contract util.Uint160
fee fixedn.Fixed8
@@ -365,7 +365,7 @@ type testNetmapClient struct {
invokedTxs []*transaction.Transaction
}
-func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
contract: contract,
fee: fee,
@@ -381,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 {
return c.contractAddress
}
-func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) {
+func (c *testNetmapClient) EpochDuration() (uint64, error) {
return c.epochDuration, nil
}
@@ -392,11 +392,11 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) {
return 0, fmt.Errorf("not found")
}
-func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) {
+func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
-func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
+func (c *testNetmapClient) NewEpoch(epoch uint64, force bool) error {
c.newEpochs = append(c.newEpochs, epoch)
return nil
}
@@ -414,6 +414,6 @@ type testEventHandler struct {
handledEvents []event.Event
}
-func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
+func (h *testEventHandler) Handle(e event.Event) {
h.handledEvents = append(h.handledEvents, e)
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
index b81dc9989..d071a7792 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
@@ -1,11 +1,10 @@
package locode
import (
- "context"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -30,7 +29,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
// - Continent: R.Continent().String().
//
// UN-LOCODE attribute remains untouched.
-func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
attrLocode := n.LOCODE()
if attrLocode == "" {
return nil
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
index fa2dd1ac1..6697391e8 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
@@ -1,14 +1,13 @@
package locode_test
import (
- "context"
"errors"
"fmt"
"testing"
- locodestd "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
+ locodestd "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@@ -93,7 +92,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
t.Run("w/o locode", func(t *testing.T) {
n := nodeInfoWithSomeAttrs()
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.NoError(t, err)
})
@@ -103,7 +102,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttrValue(n, "WRONG LOCODE")
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.Error(t, err)
})
@@ -112,7 +111,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"})
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.Error(t, err)
})
@@ -120,7 +119,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, r.LOCODE)
- err := validator.VerifyAndUpdate(context.Background(), n)
+ err := validator.VerifyAndUpdate(n)
require.NoError(t, err)
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index ba5db9205..e6332261e 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -1,45 +1,45 @@
package locode
import (
- "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
)
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // CountryCode must return ISO 3166-1 alpha-2
+ // Must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // CountryName must return English short country name
+ // Must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // LocationCode must return UN/LOCODE 3-character code
+ // Must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // LocationName must return name of the location which
+ // Must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // SubDivCode Must return ISO 1-3 character alphabetic
+ // Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // SubDivName must return subdivision name.
+ // Must return subdivision name.
SubDivName() string
- // Continent must return existing continent where is
+ // Must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Get must find the record that corresponds to
+ // Must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
index 0e4628ac7..126f36582 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go
@@ -1,7 +1,6 @@
package maddress
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -9,7 +8,7 @@ import (
)
// VerifyAndUpdate calls network.VerifyAddress.
-func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error {
+func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
err := network.VerifyMultiAddress(*n)
if err != nil {
return fmt.Errorf("could not verify multiaddress: %w", err)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 03c41a451..4094e50a5 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -7,7 +7,6 @@ map candidates.
package state
import (
- "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -24,7 +23,7 @@ type NetworkSettings interface {
// no error if allowed;
// ErrMaintenanceModeDisallowed if disallowed;
// other error if there are any problems with the check.
- MaintenanceModeAllowed(ctx context.Context) error
+ MaintenanceModeAllowed() error
}
// NetMapCandidateValidator represents tool which checks state of nodes which
@@ -56,13 +55,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
// MUST NOT be called before SetNetworkSettings.
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
-func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error {
- if node.Status().IsOnline() {
+func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
+ if node.IsOnline() {
return nil
}
- if node.Status().IsMaintenance() {
- return x.netSettings.MaintenanceModeAllowed(ctx)
+ if node.IsMaintenance() {
+ return x.netSettings.MaintenanceModeAllowed()
}
return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE")
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index cbf48a710..a557628f0 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -1,7 +1,6 @@
package state_test
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@@ -14,7 +13,7 @@ type testNetworkSettings struct {
disallowed bool
}
-func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error {
+func (x testNetworkSettings) MaintenanceModeAllowed() error {
if x.disallowed {
return state.ErrMaintenanceModeDisallowed
}
@@ -42,22 +41,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
+ preparer: (*netmap.NodeInfo).SetOnline,
valid: true,
},
{
name: "OFFLINE",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
+ preparer: (*netmap.NodeInfo).SetOffline,
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
+ preparer: (*netmap.NodeInfo).SetMaintenance,
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
+ preparer: (*netmap.NodeInfo).SetMaintenance,
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
@@ -82,7 +81,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
testCase.validatorPreparer(&v)
}
- err := v.VerifyAndUpdate(context.Background(), &node)
+ err := v.VerifyAndUpdate(&node)
if testCase.valid {
require.NoError(t, err, testCase.name)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go
index 3dbe98a8d..e9b24e024 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go
@@ -1,8 +1,6 @@
package nodevalidation
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -28,9 +26,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator {
// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators.
//
// If error appears, returns it immediately.
-func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error {
+func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error {
for _, v := range c.validators {
- if err := v.VerifyAndUpdate(ctx, ni); err != nil {
+ if err := v.VerifyAndUpdate(ni); err != nil {
return err
}
}
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index 8f8cc17ff..170c39e2c 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -1,17 +1,15 @@
package netmap
import (
- "context"
-
+ v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
-func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
+func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return true
}
@@ -19,13 +17,13 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
+ np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
- np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
+ np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@@ -33,7 +31,6 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
const methodUpdateStateNotary = "updateStateIR"
err = np.netmapClient.MorphNotaryInvoke(
- ctx,
np.netmapClient.ContractAddress(),
0,
uint32(ev.epoch),
@@ -42,14 +39,14 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
int64(v2netmap.Offline), key.Bytes(),
)
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
+ zap.String("error", err.Error()))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 7c78d24a5..01bfbae67 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,23 +1,22 @@
package netmap
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"go.uber.org/zap"
)
// Process new epoch notification by setting global epoch value and resetting
// local epoch timer.
-func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
+func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber()
- epochDuration, err := np.netmapClient.EpochDuration(ctx)
+ epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantGetEpochDuration,
+ zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@@ -26,46 +25,61 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
+ np.log.Warn(logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
- np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantResetEpochTimer,
+ zap.String("error", err.Error()))
}
// get new netmap snapshot
- networkMap, err := np.netmapClient.NetMap(ctx)
+ networkMap, err := np.netmapClient.NetMap()
if err != nil {
- np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
- zap.Error(err))
+ np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
+ zap.String("error", err.Error()))
return false
}
+ prm := cntClient.StartEstimationPrm{}
+
+ prm.SetEpoch(epoch - 1)
+ prm.SetHash(ev.TxHash())
+
+ if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch
+ err = np.containerWrp.StartEstimation(prm)
+
+ if err != nil {
+ np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
+ zap.Uint64("epoch", epoch),
+ zap.String("error", err.Error()))
+ }
+ }
+
np.netmapSnapshot.update(*networkMap, epoch)
- np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
- np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
- np.handleNotaryDeposit(ctx, ev)
+ np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
+ np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
+ np.handleNotaryDeposit(ev)
return true
}
// Process new epoch tick by invoking new epoch method in network map contract.
-func (np *Processor) processNewEpochTick(ctx context.Context) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
+func (np *Processor) processNewEpochTick() bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return true
}
nextEpoch := np.epochState.EpochCounter() + 1
- np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
+ np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
- err := np.netmapClient.NewEpoch(ctx, nextEpoch)
+ err := np.netmapClient.NewEpoch(nextEpoch, false)
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index b5c727cc7..96b8c8e97 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -13,9 +12,9 @@ import (
// Process add peer notification by sanity check of new node
// local epoch timer.
-func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
+func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return true
}
@@ -23,7 +22,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
tx := ev.NotaryRequest().MainTransaction
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
- np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
+ np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@@ -34,15 +33,15 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
- np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
+ np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
return false
}
// validate and update node info
- err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo)
+ err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
- np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
- zap.Error(err),
+ np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
+ zap.String("error", err.Error()),
)
return false
@@ -58,13 +57,8 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
- // `processAddPeer` reacts on `AddPeer` notification, `processNewEpoch` - on `NewEpoch`.
- // This two notification produces in order - `NewEpoch` -> `AddPeer`.
- // But there is no guarantee that code will be executed in the same order.
- // That is why we need to perform `addPeerIR` only in case when node is online,
- // because in scope of this method, contract set state `ONLINE` for the node.
- if updated && nodeInfo.Status().IsOnline() {
- np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
+ if updated {
+ np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@@ -77,7 +71,6 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
// create new notary request with the original nonce
err = np.netmapClient.MorphNotaryInvoke(
- ctx,
np.netmapClient.ContractAddress(),
0,
ev.NotaryRequest().MainTransaction.Nonce,
@@ -85,8 +78,9 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
methodAddPeerNotary,
nodeInfoBinary,
)
+
if err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
}
}
@@ -95,9 +89,9 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
}
// Process update peer notification by sending approval tx to the smart contract.
-func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
- if !np.alphabetState.IsAlphabet(ctx) {
- np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
+func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
+ if !np.alphabetState.IsAlphabet() {
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return true
}
@@ -108,9 +102,9 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat
var err error
if ev.Maintenance() {
- err = np.nodeStateSettings.MaintenanceModeAllowed(ctx)
+ err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
- np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
+ np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@@ -119,7 +113,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat
}
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
- np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
return false
}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index 277bca1c3..6b8a24a62 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -1,12 +1,13 @@
package netmap
import (
- "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
+ cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -16,6 +17,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
+ "go.uber.org/zap"
)
type (
@@ -34,14 +36,14 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
- IsAlphabet(context.Context) bool
+ IsAlphabet() bool
}
// NodeValidator wraps basic method of checking the correctness
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // VerifyAndUpdate must verify and optionally update NodeInfo structure.
+ // Must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
@@ -49,20 +51,24 @@ type (
//
// If no error occurs, the parameter must point to the
// ready-made NodeInfo structure.
- VerifyAndUpdate(context.Context, *netmap.NodeInfo) error
+ VerifyAndUpdate(*netmap.NodeInfo) error
}
Client interface {
- MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
+ MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160
- EpochDuration(ctx context.Context) (uint64, error)
+ EpochDuration() (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error)
- NetMap(ctx context.Context) (*netmap.NetMap, error)
- NewEpoch(ctx context.Context, epoch uint64) error
+ NetMap() (*netmap.NetMap, error)
+ NewEpoch(epoch uint64, force bool) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
+ ContainerClient interface {
+ StartEstimation(p cntClient.StartEstimationPrm) error
+ }
+
// Processor of events produced by network map contract
// and new epoch ticker, because it is related to contract.
Processor struct {
@@ -74,6 +80,7 @@ type (
alphabetState AlphabetState
netmapClient Client
+ containerWrp ContainerClient
netmapSnapshot cleanupTable
@@ -96,6 +103,7 @@ type (
AlphabetState AlphabetState
CleanupEnabled bool
CleanupThreshold uint64 // in epochs
+ ContainerWrapper ContainerClient
AlphabetSyncHandler event.Handler
NotaryDepositHandler event.Handler
@@ -125,12 +133,16 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: alphabet sync handler is not set")
case p.NotaryDepositHandler == nil:
return nil, errors.New("ir/netmap: notary deposit handler is not set")
+ case p.ContainerWrapper == nil:
+ return nil, errors.New("ir/netmap: container contract wrapper is not set")
case p.NodeValidator == nil:
return nil, errors.New("ir/netmap: node validator is not set")
case p.NodeStateSettings == nil:
return nil, errors.New("ir/netmap: node state settings is not set")
}
+ p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
+
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
@@ -149,6 +161,7 @@ func New(p *Params) (*Processor, error) {
epochState: p.EpochState,
alphabetState: p.AlphabetState,
netmapClient: p.NetmapClient,
+ containerWrp: p.ContainerWrapper,
netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold),
handleAlphabetSync: p.AlphabetSyncHandler,
@@ -161,16 +174,36 @@ func New(p *Params) (*Processor, error) {
}, nil
}
+// ListenerNotificationParsers for the 'event.Listener' event producer.
+func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
+ parsers := make([]event.NotificationParserInfo, 0, 3)
+
+ var p event.NotificationParserInfo
+
+ p.SetScriptHash(np.netmapClient.ContractAddress())
+
+ // new epoch event
+ p.SetType(newEpochNotification)
+ p.SetParser(netmapEvent.ParseNewEpoch)
+ parsers = append(parsers, p)
+
+ return parsers
+}
+
// ListenerNotificationHandlers for the 'event.Listener' event producer.
func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- return []event.NotificationHandlerInfo{
- {
- Contract: np.netmapClient.ContractAddress(),
- Type: newEpochNotification,
- Parser: netmapEvent.ParseNewEpoch,
- Handlers: []event.Handler{np.handleNewEpoch},
- },
- }
+ handlers := make([]event.NotificationHandlerInfo, 0, 3)
+
+ var i event.NotificationHandlerInfo
+
+ i.SetScriptHash(np.netmapClient.ContractAddress())
+
+ // new epoch handler
+ i.SetType(newEpochNotification)
+ i.SetHandler(np.handleNewEpoch)
+ handlers = append(handlers, i)
+
+ return handlers
}
// ListenerNotaryParsers for the 'event.Listener' event producer.
diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go
index 310f12248..255d498d3 100644
--- a/pkg/innerring/processors/netmap/wrappers.go
+++ b/pkg/innerring/processors/netmap/wrappers.go
@@ -1,8 +1,6 @@
package netmap
import (
- "context"
-
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -20,42 +18,40 @@ type netmapClientWrapper struct {
netmapClient *netmapclient.Client
}
-func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
- _, err := w.netmapClient.UpdatePeerState(ctx, p)
- return err
+func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
+ return w.netmapClient.UpdatePeerState(p)
}
-func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
- _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
- return err
+func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
+ return w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
}
func (w *netmapClientWrapper) ContractAddress() util.Uint160 {
return w.netmapClient.ContractAddress()
}
-func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) {
- return w.netmapClient.EpochDuration(ctx)
+func (w *netmapClientWrapper) EpochDuration() (uint64, error) {
+ return w.netmapClient.EpochDuration()
}
func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) {
return w.netmapClient.Morph().TxHeight(h)
}
-func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) {
- return w.netmapClient.NetMap(ctx)
+func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
+ return w.netmapClient.NetMap()
}
-func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
- return w.netmapClient.NewEpoch(ctx, epoch)
+func (w *netmapClientWrapper) NewEpoch(epoch uint64, force bool) error {
+ return w.netmapClient.NewEpoch(epoch, force)
}
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
return w.netmapClient.Morph().IsValidScript(script, signers)
}
-func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
- return w.netmapClient.AddPeer(ctx, p)
+func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
+ return w.netmapClient.AddPeer(p)
}
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index 0ef771359..c5adb71eb 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -1,14 +1,12 @@
package innerring
import (
- "context"
"fmt"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/spf13/viper"
@@ -48,21 +46,21 @@ func (s *Server) SetEpochDuration(val uint64) {
}
// IsActive is a getter for a global active flag state.
-func (s *Server) IsActive(ctx context.Context) bool {
- return s.InnerRingIndex(ctx) >= 0
+func (s *Server) IsActive() bool {
+ return s.InnerRingIndex() >= 0
}
// IsAlphabet is a getter for a global alphabet flag state.
-func (s *Server) IsAlphabet(ctx context.Context) bool {
- return s.AlphabetIndex(ctx) >= 0
+func (s *Server) IsAlphabet() bool {
+ return s.AlphabetIndex() >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list.
-func (s *Server) InnerRingIndex(ctx context.Context) int {
- index, err := s.statusIndex.InnerRingIndex(ctx)
+func (s *Server) InnerRingIndex() int {
+ index, err := s.statusIndex.InnerRingIndex()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@@ -71,10 +69,10 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
// InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index.
-func (s *Server) InnerRingSize(ctx context.Context) int {
- size, err := s.statusIndex.InnerRingSize(ctx)
+func (s *Server) InnerRingSize() int {
+ size, err := s.statusIndex.InnerRingSize()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@@ -83,28 +81,28 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
-func (s *Server) AlphabetIndex(ctx context.Context) int {
- index, err := s.statusIndex.AlphabetIndex(ctx)
+func (s *Server) AlphabetIndex() int {
+ index, err := s.statusIndex.AlphabetIndex()
if err != nil {
- s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
+ s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
return int(index)
}
-func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
+func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
validators := prm.Validators
- index := s.InnerRingIndex(ctx)
+ index := s.InnerRingIndex()
if s.contracts.alphabet.indexOutOfRange(index) {
- s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
+ s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
- s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
+ s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@@ -119,7 +117,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
)
if prm.Hash != nil {
- nonce, vub, err = s.morphClient.CalculateNonceAndVUB(prm.Hash)
+ nonce, vub, err = s.morphClient.CalculateNonceAndVUB(*prm.Hash)
if err != nil {
return fmt.Errorf("could not calculate nonce and `validUntilBlock` values: %w", err)
}
@@ -127,12 +125,12 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
- _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
+ err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
- s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
+ s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
})
@@ -141,9 +139,9 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
// VoteForSidechainValidator calls vote method on alphabet contracts with
// the provided list of keys.
-func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
+func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
sort.Sort(prm.Validators)
- return s.voteForSidechainValidator(ctx, prm)
+ return s.voteForSidechainValidator(prm)
}
// ResetEpochTimer resets the block timer that produces events to update epoch
@@ -154,24 +152,13 @@ func (s *Server) ResetEpochTimer(h uint32) error {
return s.epochTimer.Reset()
}
-func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
+func (s *Server) setHealthStatus(hs control.HealthStatus) {
s.healthStatus.Store(int32(hs))
- s.notifySystemd(ctx, hs)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(hs))
}
}
-func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
- if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
- s.notifySystemd(ctx, newSt)
- if s.irMetrics != nil {
- s.irMetrics.SetHealth(int32(newSt))
- }
- }
- return
-}
-
// HealthStatus returns the current health status of the IR application.
func (s *Server) HealthStatus() control.HealthStatus {
return control.HealthStatus(s.healthStatus.Load())
@@ -186,23 +173,3 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
return persistStorage, nil
}
-
-func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
- if !s.sdNotify {
- return
- }
- var err error
- switch st {
- case control.HealthStatus_READY:
- err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled)
- case control.HealthStatus_SHUTTING_DOWN:
- err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled)
- case control.HealthStatus_RECONFIGURING:
- err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled)
- default:
- err = sdnotify.Status(fmt.Sprintf("%v", st))
- }
- if err != nil {
- s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
- }
-}
diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go
index f60ca87c4..fe09f8f2d 100644
--- a/pkg/innerring/state_test.go
+++ b/pkg/innerring/state_test.go
@@ -1,7 +1,6 @@
package innerring
import (
- "context"
"testing"
"time"
@@ -43,12 +42,12 @@ func TestServerState(t *testing.T) {
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
var healthStatus control.HealthStatus = control.HealthStatus_READY
- srv.setHealthStatus(context.Background(), healthStatus)
+ srv.setHealthStatus(healthStatus)
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
- require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
- require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
- require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
- require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
- require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
+ require.True(t, srv.IsActive(), "invalid IsActive result")
+ require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
+ require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index")
+ require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index")
+ require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go
index a6c40f9fa..c787f9d5e 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go
@@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
},
fullSizeLimit: 1 << 30, // 1GB
objSizeLimit: 1 << 20, // 1MB
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
metrics: &NoopMetrics{},
}
}
@@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
// WithLogger returns an option to specify Blobovnicza's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
}
}
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index 95fdd844b..8d701ae5c 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -63,16 +63,16 @@ func TestBlobovnicza(t *testing.T) {
WithPath(p),
WithObjectSizeLimit(objSizeLim),
WithFullSizeLimit(sizeLim),
- WithLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
)
defer os.Remove(p)
// open Blobovnicza
- require.NoError(t, blz.Open(context.Background()))
+ require.NoError(t, blz.Open())
// initialize Blobovnicza
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Init())
// try to read non-existent address
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
@@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
return err == nil
}, nil)
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
}
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index 4947512cc..ad554a0ad 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -1,7 +1,6 @@
package blobovnicza
import (
- "context"
"errors"
"fmt"
"path/filepath"
@@ -16,7 +15,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
// If blobovnicza is already open, does nothing.
-func (b *Blobovnicza) Open(ctx context.Context) error {
+func (b *Blobovnicza) Open() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -24,7 +23,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
return nil
}
- b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
+ b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@@ -38,7 +37,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
}
}
- b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
+ b.log.Debug(logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@@ -56,7 +55,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
-func (b *Blobovnicza) Init(ctx context.Context) error {
+func (b *Blobovnicza) Init() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -64,7 +63,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
return errors.New("blobovnicza is not open")
}
- b.log.Debug(ctx, logs.BlobovniczaInitializing,
+ b.log.Debug(logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
@@ -72,7 +71,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
- b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
+ b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
@@ -82,7 +81,7 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
- b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
+ b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@@ -99,52 +98,23 @@ func (b *Blobovnicza) Init(ctx context.Context) error {
}
}
- return b.initializeCounters(ctx)
+ return b.initializeCounters()
}
-func (b *Blobovnicza) ObjectsCount() uint64 {
- return b.itemsCount.Load()
-}
-
-func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
+func (b *Blobovnicza) initializeCounters() error {
var size uint64
var items uint64
- var sizeExists bool
- var itemsCountExists bool
-
err := b.boltDB.View(func(tx *bbolt.Tx) error {
- size, sizeExists = hasDataSize(tx)
- items, itemsCountExists = hasItemsCount(tx)
-
- if sizeExists && itemsCountExists {
- return nil
- }
-
- return b.iterateAllDataBuckets(tx, func(_, _ uint64, b *bbolt.Bucket) (bool, error) {
- return false, b.ForEach(func(k, v []byte) error {
- size += uint64(len(k) + len(v))
- items++
- return nil
- })
+ return b.iterateAllBuckets(tx, func(lower, upper uint64, b *bbolt.Bucket) (bool, error) {
+ keysN := uint64(b.Stats().KeyN)
+ size += keysN * upper
+ items += keysN
+ return false, nil
})
})
if err != nil {
- return fmt.Errorf("determine DB size: %w", err)
+ return fmt.Errorf("can't determine DB size: %w", err)
}
- if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
- if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- if err := saveDataSize(tx, size); err != nil {
- return err
- }
- return saveItemsCount(tx, items)
- }); err != nil {
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
- return fmt.Errorf("save blobovnicza's size and items count: %w", err)
- }
- b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
- }
-
b.dataSize.Store(size)
b.itemsCount.Store(items)
b.metrics.AddOpenBlobovniczaSize(size)
@@ -155,7 +125,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
-func (b *Blobovnicza) Close(ctx context.Context) error {
+func (b *Blobovnicza) Close() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@@ -163,7 +133,7 @@ func (b *Blobovnicza) Close(ctx context.Context) error {
return nil
}
- b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
+ b.log.Debug(logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 8f24b5675..4bc13cc95 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -2,10 +2,9 @@ package blobovnicza
import (
"context"
- "errors"
- "syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -21,7 +20,8 @@ type DeletePrm struct {
}
// DeleteRes groups the resulting values of Delete operation.
-type DeleteRes struct{}
+type DeleteRes struct {
+}
// SetAddress sets the address of the requested object.
func (p *DeletePrm) SetAddress(addr oid.Address) {
@@ -50,10 +50,9 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
var sizeUpperBound uint64
var sizeLowerBound uint64
var dataSize uint64
- var recordSize uint64
err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- err := b.iterateAllDataBuckets(tx, func(lower, upper uint64, buck *bbolt.Bucket) (bool, error) {
+ return b.iterateAllBuckets(tx, func(lower, upper uint64, buck *bbolt.Bucket) (bool, error) {
objData := buck.Get(addrKey)
if objData == nil {
// object is not in bucket => continue iterating
@@ -62,27 +61,9 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
dataSize = uint64(len(objData))
sizeLowerBound = lower
sizeUpperBound = upper
- recordSize = dataSize + uint64(len(addrKey))
found = true
return true, buck.Delete(addrKey)
})
- if err != nil {
- return err
- }
- if found {
- return updateMeta(tx, func(count, size uint64) (uint64, uint64) {
- if count > 0 {
- count--
- }
- if size >= recordSize {
- size -= recordSize
- } else {
- size = 0
- }
- return count, size
- })
- }
- return nil
})
if err == nil && !found {
@@ -90,15 +71,12 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
}
if err == nil && found {
- b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
+ b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
- b.itemDeleted(recordSize)
- }
-
- if errors.Is(err, syscall.ENOSPC) {
- err = ErrNoSpace
+ b.itemDeleted(sizeUpperBound)
}
return DeleteRes{}, err
diff --git a/pkg/local_object_storage/blobovnicza/errors.go b/pkg/local_object_storage/blobovnicza/errors.go
deleted file mode 100644
index cff8c1776..000000000
--- a/pkg/local_object_storage/blobovnicza/errors.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package blobovnicza
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
-
-// ErrNoSpace returned if blobovnicza failed to perform an operation because of syscall.ENOSPC.
-var ErrNoSpace = logicerr.New("no space left on device with blobovnicza")
diff --git a/pkg/local_object_storage/blobovnicza/exists.go b/pkg/local_object_storage/blobovnicza/exists.go
index f7bc84d4a..e6d28f938 100644
--- a/pkg/local_object_storage/blobovnicza/exists.go
+++ b/pkg/local_object_storage/blobovnicza/exists.go
@@ -12,7 +12,9 @@ import (
// Exists check if object with the specified address is stored in b.
func (b *Blobovnicza) Exists(ctx context.Context, addr oid.Address) (bool, error) {
- exists := false
+ var (
+ exists = false
+ )
_, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Exists",
trace.WithAttributes(
@@ -24,10 +26,7 @@ func (b *Blobovnicza) Exists(ctx context.Context, addr oid.Address) (bool, error
addrKey := addressKey(addr)
err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
- if isNonDataBucket(bucketName) {
- return nil
- }
+ return tx.ForEach(func(_ []byte, buck *bbolt.Bucket) error {
exists = buck.Get(addrKey) != nil
if exists {
return errInterruptForEach
diff --git a/pkg/local_object_storage/blobovnicza/get.go b/pkg/local_object_storage/blobovnicza/get.go
index 600323f55..3a613cfeb 100644
--- a/pkg/local_object_storage/blobovnicza/get.go
+++ b/pkg/local_object_storage/blobovnicza/get.go
@@ -1,13 +1,13 @@
package blobovnicza
import (
- "bytes"
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -57,17 +57,13 @@ func (b *Blobovnicza) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
)
if err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
- if isNonDataBucket(bucketName) {
- return nil
- }
-
+ return tx.ForEach(func(_ []byte, buck *bbolt.Bucket) error {
data = buck.Get(addrKey)
if data == nil {
return nil
}
- data = bytes.Clone(data)
+ data = slice.Copy(data)
return errInterruptForEach
})
diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go
index 5a382c159..40c434eb7 100644
--- a/pkg/local_object_storage/blobovnicza/get_test.go
+++ b/pkg/local_object_storage/blobovnicza/get_test.go
@@ -2,6 +2,7 @@ package blobovnicza
import (
"context"
+ "os"
"path/filepath"
"testing"
@@ -14,11 +15,15 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
- defer func() { require.NoError(t, blz.Close(context.Background())) }()
+
+ t.Cleanup(func() {
+ blz.Close()
+ os.RemoveAll(filename)
+ })
fnInit := func(szLimit uint64) {
if blz != nil {
- require.NoError(t, blz.Close(context.Background()))
+ require.NoError(t, blz.Close())
}
blz = New(
@@ -26,8 +31,8 @@ func TestBlobovnicza_Get(t *testing.T) {
WithObjectSizeLimit(szLimit),
)
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
+ require.NoError(t, blz.Open())
+ require.NoError(t, blz.Init())
}
// initial distribution: [0:32K] (32K:64K]
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/id.go b/pkg/local_object_storage/blobovnicza/id.go
similarity index 71%
rename from pkg/local_object_storage/blobstor/blobovniczatree/id.go
rename to pkg/local_object_storage/blobovnicza/id.go
index a080819bc..3d3ccf8b9 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/id.go
+++ b/pkg/local_object_storage/blobovnicza/id.go
@@ -1,4 +1,4 @@
-package blobovniczatree
+package blobovnicza
// ID represents Blobovnicza identifier.
type ID []byte
@@ -8,8 +8,8 @@ func NewIDFromBytes(v []byte) *ID {
return (*ID)(&v)
}
-func (id ID) Path() string {
- return string(id) + dbExtension
+func (id ID) String() string {
+ return string(id)
}
func (id ID) Bytes() []byte {
diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go
index cd33b263c..b29ccb43c 100644
--- a/pkg/local_object_storage/blobovnicza/iterate.go
+++ b/pkg/local_object_storage/blobovnicza/iterate.go
@@ -1,7 +1,6 @@
package blobovnicza
import (
- "bytes"
"context"
"fmt"
"math"
@@ -13,11 +12,11 @@ import (
"go.opentelemetry.io/otel/trace"
)
-// iterateAllDataBuckets iterates all buckets in db
+// iterateAllBuckets iterates all buckets in db
//
// If the maximum size of the object (b.objSizeLimit) has been changed to lower value,
// then there may be more buckets than the current limit of the object size.
-func (b *Blobovnicza) iterateAllDataBuckets(tx *bbolt.Tx, f func(uint64, uint64, *bbolt.Bucket) (bool, error)) error {
+func (b *Blobovnicza) iterateAllBuckets(tx *bbolt.Tx, f func(uint64, uint64, *bbolt.Bucket) (bool, error)) error {
return b.iterateBucketKeys(false, func(lower uint64, upper uint64, key []byte) (bool, error) {
buck := tx.Bucket(key)
if buck == nil {
@@ -57,6 +56,14 @@ func (b *Blobovnicza) iterateBounds(useObjLimitBound bool, f func(uint64, uint64
return nil
}
+func max(a, b uint64) uint64 {
+ if a > b {
+ return a
+ }
+
+ return b
+}
+
// IterationElement represents a unit of elements through which Iterate operation passes.
type IterationElement struct {
addr oid.Address
@@ -109,7 +116,8 @@ func (x *IteratePrm) IgnoreErrors() {
}
// IterateRes groups the resulting values of Iterate operation.
-type IterateRes struct{}
+type IterateRes struct {
+}
// Iterate goes through all stored objects, and passes IterationElement to parameterized handler until error return.
//
@@ -131,10 +139,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
var elem IterationElement
if err := b.boltDB.View(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(bucketName []byte, buck *bbolt.Bucket) error {
- if isNonDataBucket(bucketName) {
- return nil
- }
+ return tx.ForEach(func(name []byte, buck *bbolt.Bucket) error {
return buck.ForEach(func(k, v []byte) error {
select {
case <-ctx.Done():
@@ -146,12 +151,12 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
if prm.ignoreErrors {
return nil
}
- return fmt.Errorf("decode address key: %w", err)
+ return fmt.Errorf("could not decode address key: %w", err)
}
}
if !prm.withoutData {
- elem.data = bytes.Clone(v)
+ elem.data = v
}
return prm.handler(elem)
diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go
index 717274781..90308723c 100644
--- a/pkg/local_object_storage/blobovnicza/iterate_test.go
+++ b/pkg/local_object_storage/blobovnicza/iterate_test.go
@@ -1,13 +1,13 @@
package blobovnicza
import (
- "bytes"
"context"
"errors"
"path/filepath"
"testing"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -15,8 +15,8 @@ import (
func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
- require.NoError(t, b.Open(context.Background()))
- require.NoError(t, b.Init(context.Background()))
+ require.NoError(t, b.Open())
+ require.NoError(t, b.Init())
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()
@@ -30,7 +30,7 @@ func TestBlobovniczaIterate(t *testing.T) {
seen := make([][]byte, 0, 2)
inc := func(e IterationElement) error {
- seen = append(seen, bytes.Clone(e.data))
+ seen = append(seen, slice.Copy(e.data))
return nil
}
diff --git a/pkg/local_object_storage/blobovnicza/meta.go b/pkg/local_object_storage/blobovnicza/meta.go
deleted file mode 100644
index 3316d4666..000000000
--- a/pkg/local_object_storage/blobovnicza/meta.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package blobovnicza
-
-import (
- "bytes"
- "encoding/binary"
-
- "go.etcd.io/bbolt"
-)
-
-const (
- dataSizeAndItemsCountBufLength = 8
-)
-
-var (
- metaBucketName = []byte("META")
- dataSizeKey = []byte("data_size")
- itemsCountKey = []byte("items_count")
-)
-
-func isNonDataBucket(bucketName []byte) bool {
- return bytes.Equal(bucketName, incompletedMoveBucketName) || bytes.Equal(bucketName, metaBucketName)
-}
-
-func hasDataSize(tx *bbolt.Tx) (uint64, bool) {
- b := tx.Bucket(metaBucketName)
- if b == nil {
- return 0, false
- }
- v := b.Get(dataSizeKey)
- if v == nil {
- return 0, false
- }
- if len(v) != dataSizeAndItemsCountBufLength {
- return 0, false
- }
- return binary.LittleEndian.Uint64(v), true
-}
-
-func hasItemsCount(tx *bbolt.Tx) (uint64, bool) {
- b := tx.Bucket(metaBucketName)
- if b == nil {
- return 0, false
- }
- v := b.Get(itemsCountKey)
- if v == nil {
- return 0, false
- }
- if len(v) != dataSizeAndItemsCountBufLength {
- return 0, false
- }
- return binary.LittleEndian.Uint64(v), true
-}
-
-func saveDataSize(tx *bbolt.Tx, size uint64) error {
- b, err := tx.CreateBucketIfNotExists(metaBucketName)
- if err != nil {
- return err
- }
- buf := make([]byte, dataSizeAndItemsCountBufLength)
- binary.LittleEndian.PutUint64(buf, size)
- return b.Put(dataSizeKey, buf)
-}
-
-func saveItemsCount(tx *bbolt.Tx, count uint64) error {
- b, err := tx.CreateBucketIfNotExists(metaBucketName)
- if err != nil {
- return err
- }
- buf := make([]byte, dataSizeAndItemsCountBufLength)
- binary.LittleEndian.PutUint64(buf, count)
- return b.Put(itemsCountKey, buf)
-}
-
-func updateMeta(tx *bbolt.Tx, updateValues func(count, size uint64) (uint64, uint64)) error {
- b, err := tx.CreateBucketIfNotExists(metaBucketName)
- if err != nil {
- return err
- }
-
- var count uint64
- var size uint64
-
- v := b.Get(itemsCountKey)
- if v != nil {
- count = binary.LittleEndian.Uint64(v)
- }
-
- v = b.Get(dataSizeKey)
- if v != nil {
- size = binary.LittleEndian.Uint64(v)
- }
-
- count, size = updateValues(count, size)
-
- sizeBuf := make([]byte, dataSizeAndItemsCountBufLength)
- binary.LittleEndian.PutUint64(sizeBuf, size)
- if err := b.Put(dataSizeKey, sizeBuf); err != nil {
- return err
- }
-
- countBuf := make([]byte, dataSizeAndItemsCountBufLength)
- binary.LittleEndian.PutUint64(countBuf, count)
- return b.Put(itemsCountKey, countBuf)
-}
diff --git a/pkg/local_object_storage/blobovnicza/move.go b/pkg/local_object_storage/blobovnicza/move.go
deleted file mode 100644
index 420e22a48..000000000
--- a/pkg/local_object_storage/blobovnicza/move.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package blobovnicza
-
-import (
- "context"
- "errors"
- "fmt"
- "syscall"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-var incompletedMoveBucketName = []byte("INCOMPLETED_MOVE")
-
-type MoveInfo struct {
- Address oid.Address
- TargetStorageID []byte
-}
-
-func (b *Blobovnicza) PutMoveInfo(ctx context.Context, prm MoveInfo) error {
- _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.PutMoveInfo",
- trace.WithAttributes(
- attribute.String("path", b.path),
- attribute.String("address", prm.Address.EncodeToString()),
- attribute.String("target_storage_id", string(prm.TargetStorageID)),
- ))
- defer span.End()
-
- key := addressKey(prm.Address)
-
- err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- bucket, err := tx.CreateBucketIfNotExists(incompletedMoveBucketName)
- if err != nil {
- return err
- }
-
- if err := bucket.Put(key, prm.TargetStorageID); err != nil {
- return fmt.Errorf("(%T) failed to save move info: %w", b, err)
- }
-
- return nil
- })
-
- if errors.Is(err, syscall.ENOSPC) {
- err = ErrNoSpace
- }
- return err
-}
-
-func (b *Blobovnicza) DropMoveInfo(ctx context.Context, address oid.Address) error {
- _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.DropMoveInfo",
- trace.WithAttributes(
- attribute.String("path", b.path),
- attribute.String("address", address.EncodeToString()),
- ))
- defer span.End()
-
- key := addressKey(address)
-
- err := b.boltDB.Update(func(tx *bbolt.Tx) error {
- bucket := tx.Bucket(incompletedMoveBucketName)
- if bucket == nil {
- return nil
- }
-
- if err := bucket.Delete(key); err != nil {
- return fmt.Errorf("(%T) failed to drop move info: %w", b, err)
- }
-
- c := bucket.Cursor()
- k, v := c.First()
- bucketEmpty := k == nil && v == nil
- if bucketEmpty {
- return tx.DeleteBucket(incompletedMoveBucketName)
- }
-
- return nil
- })
- if errors.Is(err, syscall.ENOSPC) {
- err = ErrNoSpace
- }
- return err
-}
-
-func (b *Blobovnicza) ListMoveInfo(ctx context.Context) ([]MoveInfo, error) {
- _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.ListMoveInfo",
- trace.WithAttributes(
- attribute.String("path", b.path),
- ))
- defer span.End()
-
- var result []MoveInfo
- if err := b.boltDB.View(func(tx *bbolt.Tx) error {
- bucket := tx.Bucket(incompletedMoveBucketName)
- if bucket == nil {
- return nil
- }
- return bucket.ForEach(func(k, v []byte) error {
- var addr oid.Address
- storageID := make([]byte, len(v))
- if err := addressFromKey(&addr, k); err != nil {
- return err
- }
- copy(storageID, v)
- result = append(result, MoveInfo{
- Address: addr,
- TargetStorageID: storageID,
- })
- return nil
- })
- }); err != nil {
- return nil, err
- }
-
- return result, nil
-}
diff --git a/pkg/local_object_storage/blobovnicza/put.go b/pkg/local_object_storage/blobovnicza/put.go
index ff223ba36..787372211 100644
--- a/pkg/local_object_storage/blobovnicza/put.go
+++ b/pkg/local_object_storage/blobovnicza/put.go
@@ -2,9 +2,7 @@ package blobovnicza
import (
"context"
- "errors"
"fmt"
- "syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -19,12 +17,11 @@ type PutPrm struct {
addr oid.Address
objData []byte
-
- force bool
}
// PutRes groups the resulting values of Put operation.
-type PutRes struct{}
+type PutRes struct {
+}
// SetAddress sets the address of the saving object.
func (p *PutPrm) SetAddress(addr oid.Address) {
@@ -36,11 +33,6 @@ func (p *PutPrm) SetMarshaledObject(data []byte) {
p.objData = data
}
-// SetForce sets force option.
-func (p *PutPrm) SetForce(f bool) {
- p.force = f
-}
-
// Put saves an object in Blobovnicza.
//
// If binary representation of the object is not set,
@@ -65,9 +57,8 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
defer span.End()
sz := uint64(len(prm.objData))
- bucketName := bucketForSize(sz)
+ bucketName, upperBound := bucketForSize(sz)
key := addressKey(prm.addr)
- recordSize := sz + uint64(len(key))
err := b.boltDB.Batch(func(tx *bbolt.Tx) error {
buck := tx.Bucket(bucketName)
@@ -75,15 +66,7 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
// expected to happen:
// - before initialization step (incorrect usage by design)
// - if DB is corrupted (in future this case should be handled)
- // - blobovnicza's object size changed before rebuild (handled if prm.force flag specified)
- if !prm.force {
- return logicerr.Wrap(fmt.Errorf("(%T) bucket for size %d not created", b, sz))
- }
- var err error
- buck, err = tx.CreateBucket(bucketName)
- if err != nil {
- return fmt.Errorf("(%T) failed to create bucket for size %d: %w", b, sz, err)
- }
+ return logicerr.Wrap(fmt.Errorf("(%T) bucket for size %d not created", b, sz))
}
// save the object in bucket
@@ -91,14 +74,10 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
return fmt.Errorf("(%T) could not save object in bucket: %w", b, err)
}
- return updateMeta(tx, func(count, size uint64) (uint64, uint64) {
- return count + 1, size + recordSize
- })
+ return nil
})
if err == nil {
- b.itemAdded(recordSize)
- } else if errors.Is(err, syscall.ENOSPC) {
- err = ErrNoSpace
+ b.itemAdded(upperBound)
}
return PutRes{}, err
diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go
index 9bbed0db5..cd1f69725 100644
--- a/pkg/local_object_storage/blobovnicza/sizes.go
+++ b/pkg/local_object_storage/blobovnicza/sizes.go
@@ -29,8 +29,9 @@ func bucketKeyFromBounds(upperBound uint64) []byte {
return buf[:ln]
}
-func bucketForSize(sz uint64) []byte {
- return bucketKeyFromBounds(upperPowerOfTwo(sz))
+func bucketForSize(sz uint64) ([]byte, uint64) {
+ upperBound := upperPowerOfTwo(sz)
+ return bucketKeyFromBounds(upperBound), upperBound
}
func upperPowerOfTwo(v uint64) uint64 {
@@ -57,7 +58,3 @@ func (b *Blobovnicza) itemDeleted(itemSize uint64) {
func (b *Blobovnicza) IsFull() bool {
return b.dataSize.Load() >= b.fullSizeLimit
}
-
-func (b *Blobovnicza) FillPercent() int {
- return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit)))
-}
diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go
index d582fc5e4..d2f576fd3 100644
--- a/pkg/local_object_storage/blobovnicza/sizes_test.go
+++ b/pkg/local_object_storage/blobovnicza/sizes_test.go
@@ -34,7 +34,7 @@ func TestSizes(t *testing.T) {
upperBound: 4 * firstBucketBound,
},
} {
- key := bucketForSize(item.sz)
+ key, _ := bucketForSize(item.sz)
require.Equal(t, bucketKeyFromBounds(item.upperBound), key)
}
}
@@ -42,7 +42,7 @@ func TestSizes(t *testing.T) {
func BenchmarkUpperBound(b *testing.B) {
for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
- for range b.N {
+ for i := 0; i < b.N; i++ {
_ = upperPowerOfTwo(size)
}
})
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
index dbaa7387a..526699b45 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "context"
"path/filepath"
"sync"
@@ -18,12 +17,12 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
-func (db *activeDB) Close(ctx context.Context) {
- db.shDB.Close(ctx)
+func (db *activeDB) Close() {
+ db.shDB.Close()
}
-func (db *activeDB) SystemPath() string {
- return db.shDB.SystemPath()
+func (db *activeDB) Path() string {
+ return db.shDB.Path()
}
// activeDBManager manages active blobovnicza instances (that is, those that are being used for Put).
@@ -38,24 +37,24 @@ type activeDBManager struct {
closed bool
dbManager *dbManager
- rootPath string
+ leafWidth uint64
}
-func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager {
+func newActiveDBManager(dbManager *dbManager, leafWidth uint64) *activeDBManager {
return &activeDBManager{
levelToActiveDBGuard: &sync.RWMutex{},
levelToActiveDB: make(map[string]*sharedDB),
levelLock: utilSync.NewKeyLocker[string](),
dbManager: dbManager,
- rootPath: rootPath,
+ leafWidth: leafWidth,
}
}
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
-func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
- activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
+ activeDB, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
@@ -63,7 +62,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath
return activeDB, nil
}
- return m.updateAndGetActive(ctx, lvlPath)
+ return m.updateAndGetActive(lvlPath)
}
func (m *activeDBManager) Open() {
@@ -73,18 +72,18 @@ func (m *activeDBManager) Open() {
m.closed = false
}
-func (m *activeDBManager) Close(ctx context.Context) {
+func (m *activeDBManager) Close() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
- db.Close(ctx)
+ db.Close()
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
-func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
@@ -97,13 +96,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri
return nil, nil
}
- blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
+ blz, err := db.Open() //open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
- db.Close(ctx)
+ db.Close()
return nil, nil
}
@@ -113,11 +112,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri
}, nil
}
-func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
+func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
- current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
+ current, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
@@ -125,7 +124,7 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
return current, nil
}
- nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
+ nextShDB, err := m.getNextSharedDB(lvlPath)
if err != nil {
return nil, err
}
@@ -134,7 +133,7 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
return nil, nil
}
- blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
+ blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
@@ -144,34 +143,39 @@ func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string
}, nil
}
-func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
- var nextActiveDBIdx uint64
+func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
+ var idx uint64
+ var iterCount uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
- nextActiveDBIdx = currentIdx + 1
- } else {
- hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(m.rootPath, lvlPath))
+ idx = (currentIdx + 1) % m.leafWidth
+ }
+
+ var next *sharedDB
+
+ for iterCount < m.leafWidth {
+ path := filepath.Join(lvlPath, u64ToHexString(idx))
+ shDB := m.dbManager.GetByPath(path)
+ db, err := shDB.Open() //open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
- if hasDBs {
- nextActiveDBIdx = maxIdx
+ if db.IsFull() {
+ shDB.Close()
+ } else {
+ next = shDB
+ break
}
- }
-
- path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
- next := m.dbManager.GetByPath(path)
- _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
- if err != nil {
- return nil, err
+ idx = (idx + 1) % m.leafWidth
+ iterCount++
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
- next.Close(ctx) // manager is closed, so don't hold active DB open
+ next.Close() // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
- previous.Close(ctx)
+ previous.Close()
}
return next, nil
}
@@ -188,7 +192,7 @@ func (m *activeDBManager) hasActiveDB(lvlPath string) (bool, uint64) {
if !ok {
return false, 0
}
- return true, u64FromHexString(filepath.Base(db.SystemPath()))
+ return true, u64FromHexString(filepath.Base(db.Path()))
}
func (m *activeDBManager) replace(lvlPath string, shDB *sharedDB) (*sharedDB, bool) {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index 3e8b9f07b..fd5155ee3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -1,12 +1,9 @@
package blobovniczatree
import (
- "context"
"errors"
- "os"
+ "fmt"
"strconv"
- "strings"
- "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
@@ -57,24 +54,17 @@ import (
type Blobovniczas struct {
cfg
- commondbManager *dbManager
- activeDBManager *activeDBManager
- dbCache *dbCache
- deleteProtectedObjects *addressMap
- dbFilesGuard *sync.RWMutex
- rebuildGuard *sync.RWMutex
+ commondbManager *dbManager
+ activeDBManager *activeDBManager
+ dbCache *dbCache
}
var _ common.Storage = (*Blobovniczas)(nil)
var errPutFailed = errors.New("could not save the object in any blobovnicza")
-const (
- dbExtension = ".db"
-)
-
// NewBlobovniczaTree returns new instance of blobovniczas tree.
-func NewBlobovniczaTree(ctx context.Context, opts ...Option) (blz *Blobovniczas) {
+func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
blz = new(Blobovniczas)
initConfig(&blz.cfg)
@@ -82,13 +72,13 @@ func NewBlobovniczaTree(ctx context.Context, opts ...Option) (blz *Blobovniczas)
opts[i](&blz.cfg)
}
- blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
- blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.rootPath)
- blz.dbCache = newDBCache(ctx, blz.openedCacheSize,
- blz.openedCacheTTL, blz.openedCacheExpInterval, blz.commondbManager)
- blz.deleteProtectedObjects = newAddressMap()
- blz.dbFilesGuard = &sync.RWMutex{}
- blz.rebuildGuard = &sync.RWMutex{}
+ if blz.blzLeafWidth == 0 {
+ blz.blzLeafWidth = blz.blzShallowWidth
+ }
+
+ blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.blzLeafWidth, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
+ blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.blzLeafWidth)
+ blz.dbCache = newDBCache(blz.openedCacheSize, blz.commondbManager)
return blz
}
@@ -104,46 +94,21 @@ func addressHash(addr *oid.Address, path string) uint64 {
return hrw.StringHash(a + path)
}
+// converts uint64 to hex string.
func u64ToHexString(ind uint64) string {
return strconv.FormatUint(ind, 16)
}
-func u64ToHexStringExt(ind uint64) string {
- return strconv.FormatUint(ind, 16) + dbExtension
-}
-
+// converts uint64 hex string to uint64.
func u64FromHexString(str string) uint64 {
- v, err := strconv.ParseUint(strings.TrimSuffix(str, dbExtension), 16, 64)
+ v, err := strconv.ParseUint(str, 16, 64)
if err != nil {
- panic("blobovnicza name is not an index " + str)
+ panic(fmt.Sprintf("blobovnicza name is not an index %s", str))
}
return v
}
-func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
- entries, err := os.ReadDir(directory)
- if os.IsNotExist(err) { // non initialized tree
- return false, 0, nil
- }
- if err != nil {
- return false, 0, err
- }
- if len(entries) == 0 {
- return false, 0, nil
- }
- var hasDBs bool
- var maxIdx uint64
- for _, e := range entries {
- if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
- continue
- }
- hasDBs = true
- maxIdx = max(u64FromHexString(e.Name()), maxIdx)
- }
- return hasDBs, maxIdx, nil
-}
-
// Type is blobovniczatree storage type used in logs and configuration.
const Type = "blobovnicza"
@@ -158,16 +123,16 @@ func (b *Blobovniczas) Path() string {
}
// SetCompressor implements common.Storage.
-func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) {
+func (b *Blobovniczas) SetCompressor(cc *compression.Config) {
b.compression = cc
}
-func (b *Blobovniczas) Compressor() *compression.Compressor {
+func (b *Blobovniczas) Compressor() *compression.Config {
return b.compression
}
// SetReportErrorFunc implements common.Storage.
-func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
b.reportError = f
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
index 04ff5120c..9bdee6df3 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go
@@ -1,68 +1,40 @@
package blobovniczatree
import (
- "context"
+ "fmt"
"sync"
- "time"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
- cache "github.com/go-pkgz/expirable-cache/v3"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
)
// dbCache caches sharedDB instances that are NOT open for Put.
//
// Uses dbManager for opening/closing sharedDB instances.
-// Stores a reference to a cached sharedDB, so dbManager does not close it.
+// Stores a reference to an cached sharedDB, so dbManager does not close it.
type dbCache struct {
- cacheGuard *sync.Mutex
- cache cache.Cache[string, *sharedDB]
- pathLock *utilSync.KeyLocker[string] // the order of locks is important: pathLock first, cacheGuard second
+ cacheGuard *sync.RWMutex
+ cache simplelru.LRUCache[string, *sharedDB]
+ pathLock *utilSync.KeyLocker[string]
closed bool
- nonCached map[string]struct{}
- wg sync.WaitGroup
- cancel context.CancelFunc
dbManager *dbManager
}
-func newDBCache(parentCtx context.Context, size int,
- ttl time.Duration, expInterval time.Duration,
- dbManager *dbManager,
-) *dbCache {
- ch := cache.NewCache[string, *sharedDB]().
- WithTTL(ttl).WithLRU().WithMaxKeys(size).
- WithOnEvicted(func(_ string, db *sharedDB) {
- db.Close(parentCtx)
- })
- ctx, cancel := context.WithCancel(parentCtx)
- res := &dbCache{
- cacheGuard: &sync.Mutex{},
- wg: sync.WaitGroup{},
- cancel: cancel,
- cache: ch,
+func newDBCache(size int, dbManager *dbManager) *dbCache {
+ cache, err := simplelru.NewLRU[string, *sharedDB](size, func(_ string, evictedDB *sharedDB) {
+ evictedDB.Close()
+ })
+ if err != nil {
+ // occurs only if the size is not positive
+ panic(fmt.Errorf("could not create LRU cache of size %d: %w", size, err))
+ }
+ return &dbCache{
+ cacheGuard: &sync.RWMutex{},
+ cache: cache,
dbManager: dbManager,
pathLock: utilSync.NewKeyLocker[string](),
- nonCached: make(map[string]struct{}),
}
- if ttl > 0 {
- res.wg.Add(1)
- go func() {
- ticker := time.NewTicker(expInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- res.wg.Done()
- return
- case <-ticker.C:
- res.cacheGuard.Lock()
- res.cache.DeleteExpired()
- res.cacheGuard.Unlock()
- }
- }
- }()
- }
- return res
}
func (c *dbCache) Open() {
@@ -75,39 +47,16 @@ func (c *dbCache) Open() {
func (c *dbCache) Close() {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
- c.cancel()
- c.wg.Wait()
c.cache.Purge()
c.closed = true
}
-func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
+func (c *dbCache) GetOrCreate(path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
- return c.create(ctx, path)
-}
-
-func (c *dbCache) EvictAndMarkNonCached(path string) {
- c.pathLock.Lock(path)
- defer c.pathLock.Unlock(path)
-
- c.cacheGuard.Lock()
- defer c.cacheGuard.Unlock()
-
- c.cache.Remove(path)
- c.nonCached[path] = struct{}{}
-}
-
-func (c *dbCache) RemoveFromNonCached(path string) {
- c.pathLock.Lock(path)
- defer c.pathLock.Unlock(path)
-
- c.cacheGuard.Lock()
- defer c.cacheGuard.Unlock()
-
- delete(c.nonCached, path)
+ return c.create(path)
}
func (c *dbCache) getExisted(path string) *sharedDB {
@@ -116,13 +65,11 @@ func (c *dbCache) getExisted(path string) *sharedDB {
if value, ok := c.cache.Get(path); ok {
return value
- } else if value != nil {
- c.cache.Invalidate(path)
}
return nil
}
-func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
+func (c *dbCache) create(path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
@@ -133,12 +80,12 @@ func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
value = c.dbManager.GetByPath(path)
- _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
+ _, err := value.Open() //open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
- value.Close(ctx)
+ value.Close()
}
return value
}
@@ -147,11 +94,10 @@ func (c *dbCache) put(path string, db *sharedDB) bool {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
- _, isNonCached := c.nonCached[path]
-
- if isNonCached || c.closed {
- return false
+ if !c.closed {
+ c.cache.Add(path, db)
+ return true
}
- c.cache.Add(path, db)
- return true
+
+ return false
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
index f87f4a144..24cb93865 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
@@ -18,24 +17,22 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
const n = 1000
st := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(10),
WithBlobovniczaShallowDepth(1),
WithRootPath(t.TempDir()))
- require.NoError(t, st.Open(mode.ComponentReadWrite))
+ require.NoError(t, st.Open(false))
require.NoError(t, st.Init())
- defer func() {
- require.NoError(t, st.Close(context.Background()))
- }()
+ t.Cleanup(func() {
+ require.NoError(t, st.Close())
+ })
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
var cnt atomic.Int64
var wg sync.WaitGroup
- for range 1000 {
+ for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index a6c1ce368..5ea426e1c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -2,20 +2,15 @@ package blobovniczatree
import (
"context"
- "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"go.uber.org/zap"
- "golang.org/x/sync/errgroup"
)
// Open opens blobovnicza tree.
-func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
- b.readOnly = mode.ReadOnly()
- b.metrics.SetMode(mode)
- b.metrics.SetRebuildStatus(rebuildStatusNotStarted)
+func (b *Blobovniczas) Open(readOnly bool) error {
+ b.readOnly = readOnly
+ b.metrics.SetMode(readOnly)
b.openManagers()
return nil
}
@@ -24,64 +19,36 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
- b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
+ b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
- b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
+ b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
- return b.initializeDBs(context.TODO())
-}
+ return b.iterateLeaves(context.TODO(), func(p string) (bool, error) {
+ shBlz := b.getBlobovniczaWithoutCaching(p)
+ _, err := shBlz.Open()
+ if err != nil {
+ return true, err
+ }
+ defer shBlz.Close()
-func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
- err := util.MkdirAllX(b.rootPath, b.perm)
- if err != nil {
- return err
- }
-
- eg, egCtx := errgroup.WithContext(ctx)
- if b.blzInitWorkerCount > 0 {
- eg.SetLimit(b.blzInitWorkerCount + 1)
- }
- eg.Go(func() error {
- return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
- eg.Go(func() error {
- p = strings.TrimSuffix(p, rebuildSuffix)
- shBlz := b.getBlobovniczaWithoutCaching(p)
- blz, err := shBlz.Open(egCtx)
- if err != nil {
- return err
- }
- defer shBlz.Close(egCtx)
-
- moveInfo, err := blz.ListMoveInfo(egCtx)
- if err != nil {
- return err
- }
- for _, move := range moveInfo {
- b.deleteProtectedObjects.Add(move.Address)
- }
-
- b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
- return nil
- })
- return false, nil
- })
+ b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
+ return false, nil
})
- return eg.Wait()
}
func (b *Blobovniczas) openManagers() {
- b.commondbManager.Open() // order important
+ b.commondbManager.Open() //order important
b.activeDBManager.Open()
b.dbCache.Open()
}
// Close implements common.Storage.
-func (b *Blobovniczas) Close(ctx context.Context) error {
- b.dbCache.Close() // order important
- b.activeDBManager.Close(ctx)
+func (b *Blobovniczas) Close() error {
+ b.dbCache.Close() //order important
+ b.activeDBManager.Close()
b.commondbManager.Close()
return nil
@@ -90,8 +57,8 @@ func (b *Blobovniczas) Close(ctx context.Context) error {
// returns blobovnicza with path p
//
// If blobovnicza is already cached, instance from cache is returned w/o changes.
-func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
- return b.dbCache.GetOrCreate(ctx, p)
+func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
+ return b.dbCache.GetOrCreate(p)
}
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
deleted file mode 100644
index 7db1891f9..000000000
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package blobovniczatree
-
-import (
- "context"
- "os"
- "path"
- "strconv"
- "testing"
-
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
-)
-
-func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
- t.Parallel()
-
- rootDir := t.TempDir()
-
- blz := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaShallowDepth(3),
- WithBlobovniczaShallowWidth(5),
- WithRootPath(rootDir),
- )
-
- require.NoError(t, blz.Open(mode.ComponentReadWrite))
- require.NoError(t, blz.Init())
-
- obj35 := blobstortest.NewObject(10 * 1024)
- addr35 := objectCore.AddressOf(obj35)
- raw, err := obj35.Marshal()
- require.NoError(t, err)
-
- pRes35, err := blz.Put(context.Background(), common.PutPrm{
- Address: addr35,
- Object: obj35,
- RawData: raw,
- })
- require.NoError(t, err)
-
- gRes, err := blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- StorageID: pRes35.StorageID,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- require.NoError(t, blz.Close(context.Background()))
-
- // change depth and width
- blz = NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaShallowDepth(5),
- WithBlobovniczaShallowWidth(2),
- WithRootPath(rootDir),
- )
-
- require.NoError(t, blz.Open(mode.ComponentReadWrite))
- require.NoError(t, blz.Init())
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- StorageID: pRes35.StorageID,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- obj52 := blobstortest.NewObject(10 * 1024)
- addr52 := objectCore.AddressOf(obj52)
- raw, err = obj52.Marshal()
- require.NoError(t, err)
-
- pRes52, err := blz.Put(context.Background(), common.PutPrm{
- Address: addr52,
- Object: obj52,
- RawData: raw,
- })
- require.NoError(t, err)
-
- require.NoError(t, blz.Close(context.Background()))
-
- // change depth and width back
- blz = NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaShallowDepth(3),
- WithBlobovniczaShallowWidth(5),
- WithRootPath(rootDir),
- )
- require.NoError(t, blz.Open(mode.ComponentReadWrite))
- require.NoError(t, blz.Init())
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- StorageID: pRes35.StorageID,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr35,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj35, gRes.Object)
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr52,
- StorageID: pRes52.StorageID,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj52, gRes.Object)
-
- gRes, err = blz.Get(context.Background(), common.GetPrm{
- Address: addr52,
- })
- require.NoError(t, err)
- require.EqualValues(t, obj52, gRes.Object)
-
- require.NoError(t, blz.Close(context.Background()))
-}
-
-func TestInitBlobovniczasInitErrorType(t *testing.T) {
- t.Parallel()
-
- rootDir := t.TempDir()
-
- for idx := 0; idx < 10; idx++ {
- f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"))
- require.NoError(t, err)
- _, err = f.Write([]byte("invalid db"))
- require.NoError(t, err)
- require.NoError(t, f.Close())
-
- f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix))
- require.NoError(t, err)
- require.NoError(t, f.Close())
- }
-
- blz := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaShallowDepth(1),
- WithBlobovniczaShallowWidth(1),
- WithRootPath(rootDir),
- )
-
- require.NoError(t, blz.Open(mode.ComponentReadWrite))
- err := blz.Init()
- require.Contains(t, err.Error(), "open blobovnicza")
- require.Contains(t, err.Error(), "invalid database")
- require.NoError(t, blz.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go
deleted file mode 100644
index b83849c77..000000000
--- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package blobovniczatree
-
-import (
- "context"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
-)
-
-func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
- var (
- success bool
- startedAt = time.Now()
- )
- defer func() {
- b.metrics.ObjectsCount(time.Since(startedAt), success)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
- defer span.End()
-
- var result uint64
- err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
- shDB := b.getBlobovniczaWithoutCaching(p)
- blz, err := shDB.Open(ctx)
- if err != nil {
- return true, err
- }
- defer shDB.Close(ctx)
-
- result += blz.ObjectsCount()
- return false, nil
- })
- if err != nil {
- return 0, err
- }
- return result, nil
-}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index d096791c3..28e3a8f36 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -3,13 +3,13 @@ package blobovniczatree
import (
"context"
"encoding/hex"
- "errors"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -18,11 +18,6 @@ import (
"go.uber.org/zap"
)
-var (
- errObjectIsDeleteProtected = errors.New("object is delete protected")
- deleteRes = common.DeleteRes{}
-)
-
// Delete deletes object from blobovnicza tree.
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
@@ -45,30 +40,20 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
defer span.End()
if b.readOnly {
- return deleteRes, common.ErrReadOnly
- }
-
- if b.rebuildGuard.TryRLock() {
- defer b.rebuildGuard.RUnlock()
- } else {
- return deleteRes, errRebuildInProgress
- }
-
- if b.deleteProtectedObjects.Contains(prm.Address) {
- return deleteRes, errObjectIsDeleteProtected
+ return common.DeleteRes{}, common.ErrReadOnly
}
var bPrm blobovnicza.DeletePrm
bPrm.SetAddress(prm.Address)
if prm.StorageID != nil {
- id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ id := blobovnicza.NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(id.String())
+ blz, err := shBlz.Open()
if err != nil {
return res, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@@ -78,13 +63,14 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
objectFound := false
- err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@@ -99,7 +85,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if err == nil && !objectFound {
// not found in any blobovnicza
- return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
success = err == nil
@@ -110,12 +96,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
- return deleteRes, err
+ return common.DeleteRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.deleteObject(ctx, blz, prm)
}
@@ -123,5 +109,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
- return deleteRes, err
+ return common.DeleteRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index 0c5e48821..e1a6f5ed5 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"go.opentelemetry.io/otel/attribute"
@@ -35,13 +36,13 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
defer span.End()
if prm.StorageID != nil {
- id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ id := blobovnicza.NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(id.String())
+ blz, err := shBlz.Open()
if err != nil {
return common.ExistsRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
@@ -50,13 +51,14 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address)
- err := b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ err := b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
_, err := b.getObjectFromLevel(ctx, gPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index df2b4ffe5..8cbdf613c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "bytes"
"context"
"os"
"path/filepath"
@@ -10,25 +9,23 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
)
func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
WithRootPath(dir),
WithBlobovniczaSize(1<<20))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Open(false))
require.NoError(t, b.Init())
- defer func() { require.NoError(t, b.Close(context.Background())) }()
+ t.Cleanup(func() { _ = b.Close() })
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)
@@ -40,7 +37,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
t.Run("valid but wrong storage id", func(t *testing.T) {
// "0/X/Y" <-> "1/X/Y"
- storageID := bytes.Clone(putRes.StorageID)
+ storageID := slice.Copy(putRes.StorageID)
if storageID[0] == '0' {
storageID[0]++
} else {
@@ -58,7 +55,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
// An invalid boltdb file is created so that it returns an error when opened
require.NoError(t, os.MkdirAll(filepath.Join(dir, relBadFileDir), os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName+".db"), []byte("not a boltdb file content"), 0o777))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName), []byte("not a boltdb file content"), 0777))
res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: []byte(filepath.Join(relBadFileDir, badFileName))})
require.Error(t, err)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
index 9244d765c..6eb8b81ae 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go
@@ -1,7 +1,6 @@
package blobovniczatree
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -14,9 +13,7 @@ func TestGeneric(t *testing.T) {
helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@@ -43,9 +40,7 @@ func TestControl(t *testing.T) {
newTree := func(t *testing.T) common.Storage {
return NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index e5c83e5f2..49849e759 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -46,13 +47,13 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
bPrm.SetAddress(prm.Address)
if prm.StorageID != nil {
- id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ id := blobovnicza.NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(id.String())
+ blz, err := shBlz.Open()
if err != nil {
return res, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@@ -62,13 +63,14 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
return res, err
}
- err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.getObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
}
@@ -93,12 +95,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.getObject(ctx, blz, prm)
}
@@ -113,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index 27d13f4f3..91d1e3ce0 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -45,13 +46,13 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
defer span.End()
if prm.StorageID != nil {
- id := NewIDFromBytes(prm.StorageID)
- shBlz := b.getBlobovnicza(ctx, id.Path())
- blz, err := shBlz.Open(ctx)
+ id := blobovnicza.NewIDFromBytes(prm.StorageID)
+ shBlz := b.getBlobovnicza(id.String())
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@@ -63,14 +64,15 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
objectFound := false
- err = b.iterateSortedDBPaths(ctx, prm.Address, func(p string) (bool, error) {
+ err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.getRangeFromLevel(ctx, prm, p)
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) {
- b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if outOfBounds {
return true, err
@@ -101,12 +103,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
// open blobovnicza (cached inside)
- shBlz := b.getBlobovnicza(ctx, blzPath)
- blz, err := shBlz.Open(ctx)
+ shBlz := b.getBlobovnicza(blzPath)
+ blz, err := shBlz.Open()
if err != nil {
return common.GetRangeRes{}, err
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
return b.getObjectRange(ctx, blz, prm)
}
@@ -128,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
- return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index ceb8fb7e3..a2afbb8aa 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -3,12 +3,9 @@ package blobovniczatree
import (
"context"
"fmt"
- "os"
"path/filepath"
- "strings"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -16,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/hrw"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
// Iterate iterates over all objects in b.
@@ -42,24 +38,24 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
data, err := b.compression.Decompress(elem.ObjectData())
if err != nil {
if prm.IgnoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Stringer("address", elem.Address()),
- zap.Error(err),
- zap.String("storage_id", p),
- zap.String("root_path", b.rootPath))
+ if prm.ErrorHandler != nil {
+ return prm.ErrorHandler(elem.Address(), err)
+ }
return nil
}
- return fmt.Errorf("decompress object data: %w", err)
+ return fmt.Errorf("could not decompress object data: %w", err)
}
if prm.Handler != nil {
return prm.Handler(common.IterationElement{
Address: elem.Address(),
ObjectData: data,
- StorageID: []byte(strings.TrimSuffix(p, dbExtension)),
+ StorageID: []byte(p),
})
}
- return nil
+ return prm.LazyHandler(elem.Address(), func() ([]byte, error) {
+ return data, err
+ })
})
subPrm.DecodeAddresses()
@@ -71,20 +67,16 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
- return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
- shBlz := b.getBlobovnicza(ctx, p)
- blz, err := shBlz.Open(ctx)
+ return b.iterateLeaves(ctx, func(p string) (bool, error) {
+ shBlz := b.getBlobovnicza(p)
+ blz, err := shBlz.Open()
if err != nil {
if ignoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Error(err),
- zap.String("storage_id", p),
- zap.String("root_path", b.rootPath))
return false, nil
}
- return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
+ return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
- defer shBlz.Close(ctx)
+ defer shBlz.Close()
err = f(p, blz)
@@ -92,9 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
})
}
-// iterateSortedLeaves iterates over the paths of Blobovniczas sorted by weight.
-//
-// Uses depth, width and leaf width for iteration.
+// iterator over the paths of Blobovniczas sorted by weight.
func (b *Blobovniczas) iterateSortedLeaves(ctx context.Context, addr *oid.Address, f func(string) (bool, error)) error {
_, err := b.iterateSorted(
ctx,
@@ -130,20 +120,11 @@ func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, cur
isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
levelWidth := b.blzShallowWidth
if isLeafLevel {
- hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(append([]string{b.rootPath}, curPath...)...))
- if err != nil {
- return false, err
- }
- levelWidth = 0
- if hasDBs {
- levelWidth = maxIdx + 1
- }
+ levelWidth = b.blzLeafWidth
}
indices := indexSlice(levelWidth)
- if !isLeafLevel {
- hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))
- }
+ hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))
exec := uint64(len(curPath)) == execDepth
@@ -153,16 +134,10 @@ func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, cur
return false, ctx.Err()
default:
}
-
- lastPart := u64ToHexString(indices[i])
- if isLeafLevel {
- lastPart = u64ToHexStringExt(indices[i])
- }
-
if i == 0 {
- curPath = append(curPath, lastPart)
+ curPath = append(curPath, u64ToHexString(indices[i]))
} else {
- curPath[len(curPath)-1] = lastPart
+ curPath[len(curPath)-1] = u64ToHexString(indices[i])
}
if exec {
@@ -181,131 +156,9 @@ func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, cur
return false, nil
}
-// iterateExistingDBPaths iterates over the paths of Blobovniczas without any order.
-//
-// Uses existed blobovnicza files for iteration.
-func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string) (bool, error)) error {
- b.dbFilesGuard.RLock()
- defer b.dbFilesGuard.RUnlock()
-
- _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) })
- return err
-}
-
-func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) {
- sysPath := filepath.Join(b.rootPath, path)
- entries, err := os.ReadDir(sysPath)
- if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
- return false, nil
- }
- if err != nil {
- return false, err
- }
- for _, entry := range entries {
- select {
- case <-ctx.Done():
- return false, ctx.Err()
- default:
- }
- if entry.IsDir() {
- stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter)
- if err != nil {
- return false, err
- }
- if stop {
- return true, nil
- }
- } else {
- if !fileFilter(entry.Name()) {
- continue
- }
- stop, err := f(filepath.Join(path, entry.Name()))
- if err != nil {
- return false, err
- }
- if stop {
- return true, nil
- }
- }
- }
- return false, nil
-}
-
-// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order.
-func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error {
- b.dbFilesGuard.RLock()
- defer b.dbFilesGuard.RUnlock()
-
- _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) })
- return err
-}
-
-func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error {
- b.dbFilesGuard.RLock()
- defer b.dbFilesGuard.RUnlock()
-
- _, err := b.iterateSordedDBPathsInternal(ctx, "", addr, f)
- return err
-}
-
-func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) {
- select {
- case <-ctx.Done():
- return false, ctx.Err()
- default:
- }
-
- sysPath := filepath.Join(b.rootPath, path)
- entries, err := os.ReadDir(sysPath)
- if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
- return false, nil
- }
- if err != nil {
- return false, err
- }
- var dbIdxs []uint64
- var dirIdxs []uint64
-
- for _, entry := range entries {
- if strings.HasSuffix(entry.Name(), rebuildSuffix) {
- continue
- }
- idx := u64FromHexString(entry.Name())
- if entry.IsDir() {
- dirIdxs = append(dirIdxs, idx)
- } else {
- dbIdxs = append(dbIdxs, idx)
- }
- }
-
- if len(dbIdxs) > 0 {
- for _, dbIdx := range dbIdxs {
- dbPath := filepath.Join(path, u64ToHexStringExt(dbIdx))
- stop, err := f(dbPath)
- if err != nil {
- return false, err
- }
- if stop {
- return true, nil
- }
- }
- }
-
- if len(dirIdxs) > 0 {
- hrw.SortSliceByValue(dirIdxs, addressHash(&addr, path))
- for _, dirIdx := range dirIdxs {
- dirPath := filepath.Join(path, u64ToHexString(dirIdx))
- stop, err := b.iterateSordedDBPathsInternal(ctx, dirPath, addr, f)
- if err != nil {
- return false, err
- }
- if stop {
- return true, nil
- }
- }
- }
-
- return false, nil
+// iterator over the paths of Blobovniczas in random order.
+func (b *Blobovniczas) iterateLeaves(ctx context.Context, f func(string) (bool, error)) error {
+ return b.iterateSortedLeaves(ctx, nil, f)
}
// makes slice of uint64 values from 0 to number-1.
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
index 6438f715b..7bc8e2827 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
@@ -1,10 +1,7 @@
package blobovniczatree
import (
- "context"
- "errors"
"fmt"
- "os"
"path/filepath"
"sync"
"sync/atomic"
@@ -15,11 +12,9 @@ import (
"go.uber.org/zap"
)
-var errClosingClosedBlobovnicza = errors.New("closing closed blobovnicza is not allowed")
-
// sharedDB is responsible for opening and closing a file of single blobovnicza.
type sharedDB struct {
- cond *sync.Cond
+ guard *sync.RWMutex
blcza *blobovnicza.Blobovnicza
refCount uint32
@@ -33,12 +28,10 @@ type sharedDB struct {
}
func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
- metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
-) *sharedDB {
+ metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger) *sharedDB {
return &sharedDB{
- cond: &sync.Cond{
- L: &sync.RWMutex{},
- },
+ guard: &sync.RWMutex{},
+
options: options,
path: path,
readOnly: readOnly,
@@ -49,13 +42,13 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
}
}
-func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
+func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
- b.cond.L.Lock()
- defer b.cond.L.Unlock()
+ b.guard.Lock()
+ defer b.guard.Unlock()
if b.refCount > 0 {
b.refCount++
@@ -68,11 +61,11 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
blobovnicza.WithMetrics(b.metrics),
)...)
- if err := blz.Open(ctx); err != nil {
- return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
+ if err := blz.Open(); err != nil {
+ return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
- if err := blz.Init(ctx); err != nil {
- return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
+ if err := blz.Init(); err != nil {
+ return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
b.refCount++
@@ -82,22 +75,21 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
return blz, nil
}
-func (b *sharedDB) Close(ctx context.Context) {
- b.cond.L.Lock()
- defer b.cond.L.Unlock()
+func (b *sharedDB) Close() {
+ b.guard.Lock()
+ defer b.guard.Unlock()
if b.refCount == 0 {
- b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
- b.cond.Broadcast()
+ b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
return
}
if b.refCount == 1 {
b.refCount = 0
- if err := b.blcza.Close(ctx); err != nil {
- b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+ if err := b.blcza.Close(); err != nil {
+ b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
b.blcza = nil
@@ -106,132 +98,57 @@ func (b *sharedDB) Close(ctx context.Context) {
}
b.refCount--
- if b.refCount == 1 {
- b.cond.Broadcast()
- }
}
-func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
- b.cond.L.Lock()
- if b.refCount > 1 {
- b.cond.Wait()
- }
- defer b.cond.L.Unlock()
-
- if b.refCount == 0 {
- return errClosingClosedBlobovnicza
- }
-
- if err := b.blcza.Close(ctx); err != nil {
- b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
- zap.String("id", b.path),
- zap.Error(err),
- )
- return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
- }
-
- b.refCount = 0
- b.blcza = nil
- b.openDBCounter.Dec()
-
- return os.Remove(b.path)
-}
-
-func (b *sharedDB) SystemPath() string {
+func (b *sharedDB) Path() string {
return b.path
}
-// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
-type levelDBManager struct {
- dbMtx *sync.RWMutex
- databases map[uint64]*sharedDB
-
- options []blobovnicza.Option
- path string
- readOnly bool
- metrics blobovnicza.Metrics
- openDBCounter *openDBCounter
- closedFlag *atomic.Bool
- log *logger.Logger
+// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
+type levelDbManager struct {
+ databases []*sharedDB
}
-func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
- readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
-) *levelDBManager {
- result := &levelDBManager{
- databases: make(map[uint64]*sharedDB),
- dbMtx: &sync.RWMutex{},
-
- options: options,
- path: filepath.Join(rootPath, lvlPath),
- readOnly: readOnly,
- metrics: metrics,
- openDBCounter: openDBCounter,
- closedFlag: closedFlag,
- log: log,
+func newLevelDBManager(width uint64, options []blobovnicza.Option, rootPath string, lvlPath string,
+ readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlog *atomic.Bool, log *logger.Logger) *levelDbManager {
+ result := &levelDbManager{
+ databases: make([]*sharedDB, width),
+ }
+ for idx := uint64(0); idx < width; idx++ {
+ result.databases[idx] = newSharedDB(options, filepath.Join(rootPath, lvlPath, u64ToHexString(idx)), readOnly, metrics, openDBCounter, closedFlog, log)
}
return result
}
-func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
- res := m.getDBIfExists(idx)
- if res != nil {
- return res
- }
- return m.getOrCreateDB(idx)
-}
-
-func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
- m.dbMtx.RLock()
- defer m.dbMtx.RUnlock()
-
+func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
return m.databases[idx]
}
-func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
- m.dbMtx.Lock()
- defer m.dbMtx.Unlock()
-
- db := m.databases[idx]
- if db != nil {
- return db
- }
-
- db = newSharedDB(m.options, filepath.Join(m.path, u64ToHexStringExt(idx)), m.readOnly, m.metrics, m.openDBCounter, m.closedFlag, m.log)
- m.databases[idx] = db
- return db
-}
-
-func (m *levelDBManager) hasAnyDB() bool {
- m.dbMtx.RLock()
- defer m.dbMtx.RUnlock()
-
- return len(m.databases) > 0
-}
-
// dbManager manages the opening and closing of blobovnicza instances.
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
- levelToManager map[string]*levelDBManager
+ levelToManager map[string]*levelDbManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
- rootPath string
- options []blobovnicza.Option
- readOnly bool
- metrics blobovnicza.Metrics
- log *logger.Logger
+ rootPath string
+ options []blobovnicza.Option
+ readOnly bool
+ metrics blobovnicza.Metrics
+ leafWidth uint64
+ log *logger.Logger
}
-func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager {
+func newDBManager(rootPath string, options []blobovnicza.Option, leafWidth uint64, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager {
return &dbManager{
rootPath: rootPath,
options: options,
readOnly: readOnly,
metrics: metrics,
- levelToManager: make(map[string]*levelDBManager),
+ leafWidth: leafWidth,
+ levelToManager: make(map[string]*levelDbManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
@@ -246,17 +163,6 @@ func (m *dbManager) GetByPath(path string) *sharedDB {
return levelManager.GetByIndex(curIndex)
}
-func (m *dbManager) CleanResources(path string) {
- lvlPath := filepath.Dir(path)
-
- m.levelToManagerGuard.Lock()
- defer m.levelToManagerGuard.Unlock()
-
- if result, ok := m.levelToManager[lvlPath]; ok && !result.hasAnyDB() {
- delete(m.levelToManager, lvlPath)
- }
-}
-
func (m *dbManager) Open() {
m.closedFlag.Store(false)
}
@@ -266,7 +172,7 @@ func (m *dbManager) Close() {
m.dbCounter.WaitUntilAllClosed()
}
-func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
+func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
@@ -274,14 +180,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
return m.getOrCreateLevelManager(lvlPath)
}
-func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
+func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
-func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
+func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()
@@ -289,7 +195,7 @@ func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
return result
}
- result := newLevelDBManager(m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log)
+ result := newLevelDBManager(m.leafWidth, m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log)
m.levelToManager[lvlPath] = result
return result
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go b/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go
index 68dc7ff38..032d8cf71 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/metrics.go
@@ -4,14 +4,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-)
-
-const (
- rebuildStatusNotStarted = "not_started"
- rebuildStatusRunning = "running"
- rebuildStatusCompleted = "completed"
- rebuildStatusFailed = "failed"
)
type Metrics interface {
@@ -19,14 +11,9 @@ type Metrics interface {
SetParentID(parentID string)
- SetMode(mode.ComponentMode)
+ SetMode(readOnly bool)
Close()
- SetRebuildStatus(status string)
- ObjectMoved(d time.Duration)
- SetRebuildPercent(value uint32)
- ObjectsCount(d time.Duration, success bool)
-
Delete(d time.Duration, success, withStorageID bool)
Exists(d time.Duration, success, withStorageID bool)
GetRange(d time.Duration, size int, success, withStorageID bool)
@@ -38,18 +25,14 @@ type Metrics interface {
type noopMetrics struct{}
func (m *noopMetrics) SetParentID(string) {}
-func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) SetMode(bool) {}
func (m *noopMetrics) Close() {}
-func (m *noopMetrics) SetRebuildStatus(string) {}
-func (m *noopMetrics) SetRebuildPercent(uint32) {}
-func (m *noopMetrics) ObjectMoved(time.Duration) {}
func (m *noopMetrics) Delete(time.Duration, bool, bool) {}
func (m *noopMetrics) Exists(time.Duration, bool, bool) {}
func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
-func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
func (m *noopMetrics) Blobovnicza() blobovnicza.Metrics {
return &blobovnicza.NoopMetrics{}
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
index 5f268b0f2..d11185652 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
@@ -1,9 +1,7 @@
package blobovniczatree
import (
- "context"
"io/fs"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
@@ -12,66 +10,46 @@ import (
)
type cfg struct {
- log *logger.Logger
- perm fs.FileMode
- readOnly bool
- rootPath string
- openedCacheSize int
- blzShallowDepth uint64
- blzShallowWidth uint64
- compression *compression.Compressor
- blzOpts []blobovnicza.Option
- reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
- metrics Metrics
- waitBeforeDropDB time.Duration
- blzInitWorkerCount int
- blzMoveBatchSize int
- // TTL for blobovnicza's cache
- openedCacheTTL time.Duration
- // Interval for deletion expired blobovnicza's
- openedCacheExpInterval time.Duration
+ log *logger.Logger
+ perm fs.FileMode
+ readOnly bool
+ rootPath string
+ openedCacheSize int
+ blzShallowDepth uint64
+ blzShallowWidth uint64
+ blzLeafWidth uint64
+ compression *compression.Config
+ blzOpts []blobovnicza.Option
+ // reportError is the function called when encountering disk errors.
+ reportError func(string, error)
+ metrics Metrics
}
type Option func(*cfg)
const (
- defaultPerm = 0o700
- defaultOpenedCacheSize = 50
- defaultOpenedCacheTTL = 0 // means expiring is off
- defaultOpenedCacheInterval = 15 * time.Second
- defaultBlzShallowDepth = 2
- defaultBlzShallowWidth = 16
- defaultWaitBeforeDropDB = 10 * time.Second
- defaultBlzInitWorkerCount = 5
- defaulBlzMoveBatchSize = 10000
+ defaultPerm = 0700
+ defaultOpenedCacheSize = 50
+ defaultBlzShallowDepth = 2
+ defaultBlzShallowWidth = 16
)
func initConfig(c *cfg) {
*c = cfg{
- log: logger.NewLoggerWrapper(zap.L()),
- perm: defaultPerm,
- openedCacheSize: defaultOpenedCacheSize,
- openedCacheTTL: defaultOpenedCacheTTL,
- openedCacheExpInterval: defaultOpenedCacheInterval,
- blzShallowDepth: defaultBlzShallowDepth,
- blzShallowWidth: defaultBlzShallowWidth,
- reportError: func(context.Context, string, error) {},
- metrics: &noopMetrics{},
- waitBeforeDropDB: defaultWaitBeforeDropDB,
- blzInitWorkerCount: defaultBlzInitWorkerCount,
- blzMoveBatchSize: defaulBlzMoveBatchSize,
+ log: &logger.Logger{Logger: zap.L()},
+ perm: defaultPerm,
+ openedCacheSize: defaultOpenedCacheSize,
+ blzShallowDepth: defaultBlzShallowDepth,
+ blzShallowWidth: defaultBlzShallowWidth,
+ reportError: func(string, error) {},
+ metrics: &noopMetrics{},
}
}
-func WithBlobovniczaTreeLogger(log *logger.Logger) Option {
+func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = log
- }
-}
-
-func WithBlobovniczaLogger(log *logger.Logger) Option {
- return func(c *cfg) {
- c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log))
+ c.log = l
+ c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l))
}
}
@@ -87,6 +65,12 @@ func WithBlobovniczaShallowWidth(width uint64) Option {
}
}
+func WithBlobovniczaLeafWidth(w uint64) Option {
+ return func(c *cfg) {
+ c.blzLeafWidth = w
+ }
+}
+
func WithBlobovniczaShallowDepth(depth uint64) Option {
return func(c *cfg) {
c.blzShallowDepth = depth
@@ -111,18 +95,6 @@ func WithOpenedCacheSize(sz int) Option {
}
}
-func WithOpenedCacheTTL(ttl time.Duration) Option {
- return func(c *cfg) {
- c.openedCacheTTL = ttl
- }
-}
-
-func WithOpenedCacheExpInterval(expInterval time.Duration) Option {
- return func(c *cfg) {
- c.openedCacheExpInterval = expInterval
- }
-}
-
func WithObjectSizeLimit(sz uint64) Option {
return func(c *cfg) {
c.blzOpts = append(c.blzOpts, blobovnicza.WithObjectSizeLimit(sz))
@@ -134,27 +106,3 @@ func WithMetrics(m Metrics) Option {
c.metrics = m
}
}
-
-func WithWaitBeforeDropDB(t time.Duration) Option {
- return func(c *cfg) {
- c.waitBeforeDropDB = t
- }
-}
-
-func WithMoveBatchSize(v int) Option {
- return func(c *cfg) {
- c.blzMoveBatchSize = v
- }
-}
-
-// WithInitWorkerCount sets maximum workers count to init blobovnicza tree.
-//
-// Negative or zero value means no limit.
-func WithInitWorkerCount(v int) Option {
- if v <= 0 {
- v = -1
- }
- return func(c *cfg) {
- c.blzInitWorkerCount = v
- }
-}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 37c49d741..6f9c8c0de 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -2,13 +2,13 @@ package blobovniczatree
import (
"context"
- "errors"
"path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -70,49 +70,50 @@ func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRe
type putIterator struct {
B *Blobovniczas
- ID *ID
+ ID *blobovnicza.ID
AllFull bool
PutPrm blobovnicza.PutPrm
}
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
- active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
+ active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
- zap.Error(err))
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return false, nil
}
if active == nil {
- i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+ i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false, nil
}
- defer active.Close(ctx)
+ defer active.Close()
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
- zap.String("path", active.SystemPath()),
- zap.Error(err))
- }
- if errors.Is(err, blobovnicza.ErrNoSpace) {
- i.AllFull = true
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
+ zap.String("path", active.Path()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
+
return false, nil
}
- idx := u64FromHexString(filepath.Base(active.SystemPath()))
- i.ID = NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx))))
+ idx := u64FromHexString(filepath.Base(active.Path()))
+ i.ID = blobovnicza.NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx))))
return true, nil
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
deleted file mode 100644
index a840275b8..000000000
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ /dev/null
@@ -1,618 +0,0 @@
-package blobovniczatree
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-const rebuildSuffix = ".rebuild"
-
-var (
- errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed")
- errBatchFull = errors.New("batch full")
-)
-
-func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (common.RebuildRes, error) {
- if b.readOnly {
- return common.RebuildRes{}, common.ErrReadOnly
- }
-
- b.metrics.SetRebuildStatus(rebuildStatusRunning)
- b.metrics.SetRebuildPercent(0)
- success := true
- defer func() {
- if success {
- b.metrics.SetRebuildStatus(rebuildStatusCompleted)
- } else {
- b.metrics.SetRebuildStatus(rebuildStatusFailed)
- }
- }()
-
- b.rebuildGuard.Lock()
- defer b.rebuildGuard.Unlock()
-
- var res common.RebuildRes
-
- b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
- completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter)
- res.ObjectsMoved += completedPreviosMoves
- if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
- success = false
- return res, err
- }
- b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
-
- b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
- dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
- if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
- success = false
- return res, err
- }
-
- b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
- res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
- if err != nil {
- success = false
- }
- return res, err
-}
-
-func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
- var completedDBCount uint32
- for _, db := range dbs {
- b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
- movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter)
- res.ObjectsMoved += movedObjects
- if err != nil {
- b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
- return res, err
- }
- b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
- res.FilesRemoved++
- completedDBCount++
- b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
- }
- b.metrics.SetRebuildPercent(100)
- return res, nil
-}
-
-func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) {
- withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx)
- if err != nil {
- return nil, err
- }
- withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent)
- if err != nil {
- return nil, err
- }
- for k := range withFillPercent {
- withSchemaChange[k] = struct{}{}
- }
- result := make([]string, 0, len(withSchemaChange))
- for db := range withSchemaChange {
- result = append(result, db)
- }
- return result, nil
-}
-
-func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) {
- dbsToMigrate := make(map[string]struct{})
- if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
- dbsToMigrate[s] = struct{}{}
- return false, nil
- }); err != nil {
- return nil, err
- }
- if err := b.iterateSortedLeaves(ctx, nil, func(s string) (bool, error) {
- delete(dbsToMigrate, s)
- return false, nil
- }); err != nil {
- return nil, err
- }
- return dbsToMigrate, nil
-}
-
-func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) {
- if target <= 0 || target > 100 {
- return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target)
- }
- result := make(map[string]struct{})
- if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) {
- dir := filepath.Join(b.rootPath, lvlPath)
- entries, err := os.ReadDir(dir)
- if os.IsNotExist(err) { // non initialized tree
- return false, nil
- }
- if err != nil {
- return false, err
- }
- hasDBs := false
- // db with maxIdx could be an active, so it should not be rebuilded
- var maxIdx uint64
- for _, e := range entries {
- if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
- continue
- }
- hasDBs = true
- maxIdx = max(u64FromHexString(e.Name()), maxIdx)
- }
- if !hasDBs {
- return false, nil
- }
- for _, e := range entries {
- if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
- continue
- }
- if u64FromHexString(e.Name()) == maxIdx {
- continue
- }
- path := filepath.Join(lvlPath, e.Name())
- resettlementRequired, err := b.rebuildBySize(ctx, path, target)
- if err != nil {
- return false, err
- }
- if resettlementRequired {
- result[path] = struct{}{}
- }
- }
- return false, nil
- }); err != nil {
- return nil, err
- }
- return result, nil
-}
-
-func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
- shDB := b.getBlobovnicza(ctx, path)
- blz, err := shDB.Open(ctx)
- if err != nil {
- return false, err
- }
- defer shDB.Close(ctx)
- fp := blz.FillPercent()
- // accepted fill percent defines as
- // |----|+++++++++++++++++|+++++++++++++++++|---------------
- // 0% target 100% 100+(100 - target)
- // where `+` - accepted fill percent, `-` - not accepted fill percent
- return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
-}
-
-func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) {
- shDB := b.getBlobovnicza(ctx, path)
- blz, err := shDB.Open(ctx)
- if err != nil {
- return 0, err
- }
- shDBClosed := false
- defer func() {
- if shDBClosed {
- return
- }
- shDB.Close(ctx)
- }()
- dropTempFile, err := b.addRebuildTempFile(ctx, path)
- if err != nil {
- return 0, err
- }
- migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter)
- if err != nil {
- return migratedObjects, err
- }
- shDBClosed, err = b.dropDB(ctx, path, shDB)
- if err == nil {
- // drop only on success to continue rebuild on error
- dropTempFile()
- }
- return migratedObjects, err
-}
-
-func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
- sysPath := filepath.Join(b.rootPath, path)
- sysPath += rebuildSuffix
- _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
- if err != nil {
- return nil, err
- }
- return func() {
- if err := os.Remove(sysPath); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
- }
- }, nil
-}
-
-func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) {
- var result atomic.Uint64
- batch := make(map[oid.Address][]byte)
-
- var prm blobovnicza.IteratePrm
- prm.DecodeAddresses()
- prm.SetHandler(func(ie blobovnicza.IterationElement) error {
- batch[ie.Address()] = bytes.Clone(ie.ObjectData())
- if len(batch) == b.blzMoveBatchSize {
- return errBatchFull
- }
- return nil
- })
-
- for {
- release, err := limiter.ReadRequest(ctx)
- if err != nil {
- return result.Load(), err
- }
- _, err = blz.Iterate(ctx, prm)
- release()
- if err != nil && !errors.Is(err, errBatchFull) {
- return result.Load(), err
- }
-
- if len(batch) == 0 {
- break
- }
-
- eg, egCtx := errgroup.WithContext(ctx)
-
- for addr, data := range batch {
- release, err := limiter.AcquireWorkSlot(egCtx)
- if err != nil {
- _ = eg.Wait()
- return result.Load(), err
- }
- eg.Go(func() error {
- defer release()
- moveRelease, err := limiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- err = b.moveObject(egCtx, blz, blzPath, addr, data, meta)
- moveRelease()
- if err == nil {
- result.Add(1)
- }
- return err
- })
- }
- if err := eg.Wait(); err != nil {
- return result.Load(), err
- }
-
- batch = make(map[oid.Address][]byte)
- }
-
- return result.Load(), nil
-}
-
-func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
- addr oid.Address, data []byte, metaStore common.MetaStorage,
-) error {
- startedAt := time.Now()
- defer func() {
- b.metrics.ObjectMoved(time.Since(startedAt))
- }()
- it := &moveIterator{
- B: b,
- ID: nil,
- AllFull: true,
- Address: addr,
- ObjectData: data,
- MetaStore: metaStore,
- Source: source,
- SourceSysPath: sourcePath,
- }
-
- if err := b.iterateDeepest(ctx, addr, func(lvlPath string) (bool, error) { return it.tryMoveToLvl(ctx, lvlPath) }); err != nil {
- return err
- } else if it.ID == nil {
- if it.AllFull {
- return common.ErrNoSpace
- }
- return errPutFailed
- }
- return nil
-}
-
-func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
- select {
- case <-ctx.Done():
- return false, ctx.Err()
- case <-time.After(b.waitBeforeDropDB): // to complete requests with old storage ID
- }
-
- b.dbCache.EvictAndMarkNonCached(path)
- defer b.dbCache.RemoveFromNonCached(path)
-
- b.dbFilesGuard.Lock()
- defer b.dbFilesGuard.Unlock()
-
- if err := shDB.CloseAndRemoveFile(ctx); err != nil {
- return false, err
- }
- b.commondbManager.CleanResources(path)
- if err := b.dropDirectoryIfEmpty(filepath.Dir(path)); err != nil {
- return true, err
- }
- return true, nil
-}
-
-func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
- if path == "." {
- return nil
- }
-
- sysPath := filepath.Join(b.rootPath, path)
- entries, err := os.ReadDir(sysPath)
- if err != nil {
- return err
- }
- if len(entries) > 0 {
- return nil
- }
- if err := os.Remove(sysPath); err != nil {
- return err
- }
- return b.dropDirectoryIfEmpty(filepath.Dir(path))
-}
-
-func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) {
- var count uint64
- var rebuildTempFilesToRemove []string
- err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
- rebuildTmpFilePath := s
- s = strings.TrimSuffix(s, rebuildSuffix)
- shDB := b.getBlobovnicza(ctx, s)
- blz, err := shDB.Open(ctx)
- if err != nil {
- return true, err
- }
- defer shDB.Close(ctx)
-
- release, err := rateLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- incompletedMoves, err := blz.ListMoveInfo(ctx)
- release()
- if err != nil {
- return true, err
- }
-
- for _, move := range incompletedMoves {
- release, err := rateLimiter.WriteRequest(ctx)
- if err != nil {
- return false, err
- }
- err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore)
- release()
- if err != nil {
- return true, err
- }
- count++
- }
-
- rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath)
- return false, nil
- })
- for _, tmp := range rebuildTempFilesToRemove {
- release, err := rateLimiter.WriteRequest(ctx)
- if err != nil {
- return count, err
- }
- if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
- }
- release()
- }
- return count, err
-}
-
-func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
- move blobovnicza.MoveInfo, metaStore common.MetaStorage,
-) error {
- targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
- target, err := targetDB.Open(ctx)
- if err != nil {
- return err
- }
- defer targetDB.Close(ctx)
-
- existsInSource := true
- var gPrm blobovnicza.GetPrm
- gPrm.SetAddress(move.Address)
- gRes, err := source.Get(ctx, gPrm)
- if err != nil {
- if client.IsErrObjectNotFound(err) {
- existsInSource = false
- } else {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
- return err
- }
- }
-
- if !existsInSource { // object was deleted by Rebuild, need to delete move info
- if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
- return err
- }
- b.deleteProtectedObjects.Delete(move.Address)
- return nil
- }
-
- existsInTarget, err := target.Exists(ctx, move.Address)
- if err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
- return err
- }
-
- if !existsInTarget {
- var putPrm blobovnicza.PutPrm
- putPrm.SetAddress(move.Address)
- putPrm.SetMarshaledObject(gRes.Object())
- _, err = target.Put(ctx, putPrm)
- if err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
- return err
- }
- }
-
- if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
- return err
- }
-
- var deletePrm blobovnicza.DeletePrm
- deletePrm.SetAddress(move.Address)
- if _, err = source.Delete(ctx, deletePrm); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
- return err
- }
-
- if err = source.DropMoveInfo(ctx, move.Address); err != nil {
- b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
- return err
- }
-
- b.deleteProtectedObjects.Delete(move.Address)
- return nil
-}
-
-type moveIterator struct {
- B *Blobovniczas
- ID *ID
- AllFull bool
- Address oid.Address
- ObjectData []byte
- MetaStore common.MetaStorage
- Source *blobovnicza.Blobovnicza
- SourceSysPath string
-}
-
-func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
- target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
- if err != nil {
- if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
- } else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
- }
- return false, nil
- }
-
- if target == nil {
- i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
- return false, nil
- }
- defer target.Close(ctx)
-
- i.AllFull = false
-
- targetIDx := u64FromHexString(filepath.Base(target.SystemPath()))
- targetStorageID := NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(targetIDx))))
-
- if err = i.Source.PutMoveInfo(ctx, blobovnicza.MoveInfo{
- Address: i.Address,
- TargetStorageID: targetStorageID.Bytes(),
- }); err != nil {
- if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
- } else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
- }
- return true, nil
- }
- i.B.deleteProtectedObjects.Add(i.Address)
-
- var putPrm blobovnicza.PutPrm
- putPrm.SetAddress(i.Address)
- putPrm.SetMarshaledObject(i.ObjectData)
- putPrm.SetForce(true)
-
- _, err = target.Blobovnicza().Put(ctx, putPrm)
- if err != nil {
- if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
- } else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
- }
- return true, nil
- }
-
- if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
- return true, nil
- }
-
- var deletePrm blobovnicza.DeletePrm
- deletePrm.SetAddress(i.Address)
- if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
- if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
- } else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
- }
- return true, nil
- }
-
- if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
- if !isLogical(err) {
- i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
- } else {
- i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
- }
- return true, nil
- }
- i.B.deleteProtectedObjects.Delete(i.Address)
-
- i.ID = targetStorageID
- return true, nil
-}
-
-type addressMap struct {
- data map[oid.Address]struct{}
- guard *sync.RWMutex
-}
-
-func newAddressMap() *addressMap {
- return &addressMap{
- data: make(map[oid.Address]struct{}),
- guard: &sync.RWMutex{},
- }
-}
-
-func (m *addressMap) Add(address oid.Address) {
- m.guard.Lock()
- defer m.guard.Unlock()
-
- m.data[address] = struct{}{}
-}
-
-func (m *addressMap) Delete(address oid.Address) {
- m.guard.Lock()
- defer m.guard.Unlock()
-
- delete(m.data, address)
-}
-
-func (m *addressMap) Contains(address oid.Address) bool {
- m.guard.RLock()
- defer m.guard.RUnlock()
-
- _, contains := m.data[address]
- return contains
-}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
deleted file mode 100644
index 4146ef260..000000000
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package blobovniczatree
-
-import (
- "bytes"
- "context"
- "os"
- "path/filepath"
- "sync"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/stretchr/testify/require"
-)
-
-func TestRebuildFailover(t *testing.T) {
- t.Parallel()
-
- t.Run("only move info saved", testRebuildFailoverOnlyMoveInfoSaved)
-
- t.Run("object saved to target", testRebuildFailoverObjectSavedToTarget)
-
- t.Run("object deleted from source", testRebuildFailoverObjectDeletedFromSource)
-}
-
-func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
- t.Parallel()
- dir := t.TempDir()
-
- blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- obj := blobstortest.NewObject(1024)
- data, err := obj.Marshal()
- require.NoError(t, err)
-
- var pPrm blobovnicza.PutPrm
- pPrm.SetAddress(object.AddressOf(obj))
- pPrm.SetMarshaledObject(data)
- _, err = blz.Put(context.Background(), pPrm)
- require.NoError(t, err)
-
- require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
- Address: object.AddressOf(obj),
- TargetStorageID: []byte("0/0/0"),
- }))
-
- require.NoError(t, blz.Close(context.Background()))
- _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
- require.NoError(t, err)
-
- testRebuildFailoverValidate(t, dir, obj, true)
-}
-
-func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
- t.Parallel()
- dir := t.TempDir()
-
- blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- obj := blobstortest.NewObject(1024)
- data, err := obj.Marshal()
- require.NoError(t, err)
-
- var pPrm blobovnicza.PutPrm
- pPrm.SetAddress(object.AddressOf(obj))
- pPrm.SetMarshaledObject(data)
- _, err = blz.Put(context.Background(), pPrm)
- require.NoError(t, err)
-
- require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
- Address: object.AddressOf(obj),
- TargetStorageID: []byte("0/0/0"),
- }))
-
- require.NoError(t, blz.Close(context.Background()))
-
- _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
- require.NoError(t, err)
-
- blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- _, err = blz.Put(context.Background(), pPrm)
- require.NoError(t, err)
-
- require.NoError(t, blz.Close(context.Background()))
-
- testRebuildFailoverValidate(t, dir, obj, true)
-}
-
-func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
- t.Parallel()
- dir := t.TempDir()
-
- blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- obj := blobstortest.NewObject(1024)
- data, err := obj.Marshal()
- require.NoError(t, err)
-
- require.NoError(t, blz.PutMoveInfo(context.Background(), blobovnicza.MoveInfo{
- Address: object.AddressOf(obj),
- TargetStorageID: []byte("0/0/0"),
- }))
-
- require.NoError(t, blz.Close(context.Background()))
-
- _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
- require.NoError(t, err)
-
- blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- var pPrm blobovnicza.PutPrm
- pPrm.SetAddress(object.AddressOf(obj))
- pPrm.SetMarshaledObject(data)
- _, err = blz.Put(context.Background(), pPrm)
- require.NoError(t, err)
-
- require.NoError(t, blz.Close(context.Background()))
-
- testRebuildFailoverValidate(t, dir, obj, false)
-}
-
-func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) {
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(2048),
- WithBlobovniczaShallowWidth(2),
- WithBlobovniczaShallowDepth(2),
- WithRootPath(dir),
- WithBlobovniczaSize(10*1024),
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- var dPrm common.DeletePrm
- dPrm.Address = object.AddressOf(obj)
- dPrm.StorageID = []byte("0/0/1")
- _, err := b.Delete(context.Background(), dPrm)
- require.ErrorIs(t, err, errObjectIsDeleteProtected)
-
- metaStub := &storageIDUpdateStub{
- storageIDs: make(map[oid.Address][]byte),
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 1,
- })
- require.NoError(t, err)
- require.Equal(t, uint64(1), rRes.ObjectsMoved)
- require.Equal(t, uint64(0), rRes.FilesRemoved)
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
-
- blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- moveInfo, err := blz.ListMoveInfo(context.Background())
- require.NoError(t, err)
- require.Equal(t, 0, len(moveInfo))
-
- var gPrm blobovnicza.GetPrm
- gPrm.SetAddress(object.AddressOf(obj))
- _, err = blz.Get(context.Background(), gPrm)
- require.True(t, client.IsErrObjectNotFound(err))
-
- require.NoError(t, blz.Close(context.Background()))
-
- blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
- require.NoError(t, blz.Open(context.Background()))
- require.NoError(t, blz.Init(context.Background()))
-
- moveInfo, err = blz.ListMoveInfo(context.Background())
- require.NoError(t, err)
- require.Equal(t, 0, len(moveInfo))
-
- gRes, err := blz.Get(context.Background(), gPrm)
- require.NoError(t, err)
- require.True(t, len(gRes.Object()) > 0)
-
- if mustUpdateStorageID {
- require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
- }
-
- require.NoError(t, blz.Close(context.Background()))
-
- _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
- require.True(t, os.IsNotExist(err))
-}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
deleted file mode 100644
index a7a99fec3..000000000
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ /dev/null
@@ -1,520 +0,0 @@
-package blobovniczatree
-
-import (
- "context"
- "fmt"
- "sync"
- "sync/atomic"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/stretchr/testify/require"
- "golang.org/x/sync/errgroup"
-)
-
-func TestBlobovniczaTreeSchemaRebuild(t *testing.T) {
- t.Parallel()
-
- t.Run("width increased", func(t *testing.T) {
- t.Parallel()
- testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 3, false)
- })
-
- t.Run("width reduced", func(t *testing.T) {
- t.Parallel()
- testBlobovniczaTreeRebuildHelper(t, 2, 2, 2, 1, true)
- })
-
- t.Run("depth increased", func(t *testing.T) {
- t.Parallel()
- testBlobovniczaTreeRebuildHelper(t, 1, 2, 2, 2, true)
- })
-
- t.Run("depth reduced", func(t *testing.T) {
- t.Parallel()
- testBlobovniczaTreeRebuildHelper(t, 2, 2, 1, 2, true)
- })
-}
-
-func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
- t.Parallel()
-
- t.Run("no rebuild by fill percent", func(t *testing.T) {
- t.Parallel()
-
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024),
- WithBlobovniczaShallowWidth(1), // single directory
- WithBlobovniczaShallowDepth(1),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- storageIDs := make(map[oid.Address][]byte)
- for range 100 {
- obj := blobstortest.NewObject(64 * 1024) // 64KB object
- data, err := obj.Marshal()
- require.NoError(t, err)
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(context.Background(), prm)
- require.NoError(t, err)
- storageIDs[prm.Address] = res.StorageID
- }
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 60,
- })
- require.NoError(t, err)
- dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
- require.False(t, dataMigrated)
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
- })
-
- t.Run("no rebuild single db", func(t *testing.T) {
- t.Parallel()
-
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024),
- WithBlobovniczaShallowWidth(1), // single directory
- WithBlobovniczaShallowDepth(1),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- storageIDs := make(map[oid.Address][]byte)
- obj := blobstortest.NewObject(64 * 1024) // 64KB object
- data, err := obj.Marshal()
- require.NoError(t, err)
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(context.Background(), prm)
- require.NoError(t, err)
- storageIDs[prm.Address] = res.StorageID
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 90, // 64KB / 100KB = 64%
- })
- require.NoError(t, err)
- dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
- require.False(t, dataMigrated)
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
- })
-
- t.Run("rebuild by fill percent", func(t *testing.T) {
- t.Parallel()
-
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024),
- WithBlobovniczaShallowWidth(1), // single directory
- WithBlobovniczaShallowDepth(1),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- storageIDs := make(map[oid.Address][]byte)
- toDelete := make(map[oid.Address][]byte)
- for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
- obj := blobstortest.NewObject(64 * 1024)
- data, err := obj.Marshal()
- require.NoError(t, err)
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(context.Background(), prm)
- require.NoError(t, err)
- storageIDs[prm.Address] = res.StorageID
- if i%2 == 1 {
- toDelete[prm.Address] = res.StorageID
- }
- }
- for addr, storageID := range toDelete {
- var prm common.DeletePrm
- prm.Address = addr
- prm.StorageID = storageID
- _, err := b.Delete(context.Background(), prm)
- require.NoError(t, err)
- }
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 80,
- })
- require.NoError(t, err)
- require.Equal(t, uint64(49), rRes.FilesRemoved)
- require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects
- require.Equal(t, uint64(49), metaStub.updatedCount)
-
- for addr, storageID := range storageIDs {
- if _, found := toDelete[addr]; found {
- continue
- }
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
- })
-
- t.Run("rebuild by overflow", func(t *testing.T) {
- t.Parallel()
-
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024),
- WithBlobovniczaShallowWidth(1), // single directory
- WithBlobovniczaShallowDepth(1),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- storageIDs := make(map[oid.Address][]byte)
- for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
- obj := blobstortest.NewObject(64 * 1024)
- data, err := obj.Marshal()
- require.NoError(t, err)
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(context.Background(), prm)
- require.NoError(t, err)
- storageIDs[prm.Address] = res.StorageID
- }
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- require.NoError(t, b.Close(context.Background()))
- b = NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024),
- WithBlobovniczaShallowWidth(1),
- WithBlobovniczaShallowDepth(1),
- WithRootPath(dir),
- WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- limiter := &rebuildLimiterStub{}
- rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
- MetaStorage: metaStub,
- Limiter: limiter,
- FillPercent: 80,
- })
- require.NoError(t, err)
- require.Equal(t, uint64(49), rRes.FilesRemoved)
- require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects
- require.Equal(t, uint64(98), metaStub.updatedCount)
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
- })
-}
-
-func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
- t.Parallel()
-
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(64*1024), // 64KB object size limit
- WithBlobovniczaShallowWidth(5),
- WithBlobovniczaShallowDepth(2), // depth = 2
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024),
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- obj := blobstortest.NewObject(64 * 1024) // 64KB object
- data, err := obj.Marshal()
- require.NoError(t, err)
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(context.Background(), prm)
- require.NoError(t, err)
-
- storageIDs := make(map[oid.Address][]byte)
- storageIDs[prm.Address] = res.StorageID
-
- require.NoError(t, b.Close(context.Background()))
-
- b = NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(32*1024), // 32KB object size limit
- WithBlobovniczaShallowWidth(5),
- WithBlobovniczaShallowDepth(3), // depth = 3
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024),
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- var rPrm common.RebuildPrm
- rPrm.MetaStorage = metaStub
- rPrm.Limiter = limiter
- rPrm.FillPercent = 1
- rRes, err := b.Rebuild(context.Background(), rPrm)
- require.NoError(t, err)
- dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
- require.True(t, dataMigrated)
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
-}
-
-func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
- dir := t.TempDir()
- b := NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(2048),
- WithBlobovniczaShallowWidth(sourceWidth),
- WithBlobovniczaShallowDepth(sourceDepth),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024),
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(3))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- eg, egCtx := errgroup.WithContext(context.Background())
- storageIDs := make(map[oid.Address][]byte)
- storageIDsGuard := &sync.Mutex{}
- for range 100 {
- eg.Go(func() error {
- obj := blobstortest.NewObject(1024)
- data, err := obj.Marshal()
- if err != nil {
- return err
- }
- var prm common.PutPrm
- prm.Address = object.AddressOf(obj)
- prm.RawData = data
- res, err := b.Put(egCtx, prm)
- if err != nil {
- return err
- }
- storageIDsGuard.Lock()
- storageIDs[prm.Address] = res.StorageID
- storageIDsGuard.Unlock()
- return nil
- })
- }
-
- require.NoError(t, eg.Wait())
- require.NoError(t, b.Close(context.Background()))
-
- b = NewBlobovniczaTree(
- context.Background(),
- WithBlobovniczaLogger(test.NewLogger(t)),
- WithBlobovniczaTreeLogger(test.NewLogger(t)),
- WithObjectSizeLimit(2048),
- WithBlobovniczaShallowWidth(targetWidth),
- WithBlobovniczaShallowDepth(targetDepth),
- WithRootPath(dir),
- WithBlobovniczaSize(100*1024),
- WithWaitBeforeDropDB(0),
- WithOpenedCacheSize(1000),
- WithMoveBatchSize(50))
- require.NoError(t, b.Open(mode.ComponentReadWrite))
- require.NoError(t, b.Init())
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- metaStub := &storageIDUpdateStub{
- storageIDs: storageIDs,
- guard: &sync.Mutex{},
- }
- limiter := &rebuildLimiterStub{}
- var rPrm common.RebuildPrm
- rPrm.MetaStorage = metaStub
- rPrm.Limiter = limiter
- rPrm.FillPercent = 1
- rRes, err := b.Rebuild(context.Background(), rPrm)
- require.NoError(t, err)
- dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
- require.Equal(t, shouldMigrate, dataMigrated)
-
- for addr, storageID := range storageIDs {
- var gPrm common.GetPrm
- gPrm.Address = addr
- gPrm.StorageID = storageID
- _, err := b.Get(context.Background(), gPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, b.Close(context.Background()))
- require.NoError(t, limiter.ValidateReleased())
-}
-
-type storageIDUpdateStub struct {
- guard *sync.Mutex
- storageIDs map[oid.Address][]byte
- updatedCount uint64
-}
-
-func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
- s.guard.Lock()
- defer s.guard.Unlock()
-
- s.storageIDs[addr] = storageID
- s.updatedCount++
- return nil
-}
-
-type rebuildLimiterStub struct {
- slots atomic.Int64
- readRequests atomic.Int64
- writeRequests atomic.Int64
-}
-
-func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) {
- s.slots.Add(1)
- return func() { s.slots.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) {
- s.readRequests.Add(1)
- return func() { s.readRequests.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) {
- s.writeRequests.Add(1)
- return func() { s.writeRequests.Add(-1) }, nil
-}
-
-func (s *rebuildLimiterStub) ValidateReleased() error {
- if v := s.slots.Load(); v != 0 {
- return fmt.Errorf("invalid slots value %d", v)
- }
- if v := s.readRequests.Load(); v != 0 {
- return fmt.Errorf("invalid read requests value %d", v)
- }
- if v := s.writeRequests.Load(); v != 0 {
- return fmt.Errorf("invalid write requests value %d", v)
- }
- return nil
-}
diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go
index ceaf2538a..d2a2338a3 100644
--- a/pkg/local_object_storage/blobstor/blobstor.go
+++ b/pkg/local_object_storage/blobstor/blobstor.go
@@ -1,7 +1,6 @@
package blobstor
import (
- "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -41,21 +40,20 @@ type SubStorageInfo struct {
type Option func(*cfg)
type cfg struct {
- compression compression.Compressor
+ compression compression.Config
log *logger.Logger
storage []SubStorage
metrics Metrics
}
func initConfig(c *cfg) {
- c.log = logger.NewLoggerWrapper(zap.L())
+ c.log = &logger.Logger{Logger: zap.L()}
c.metrics = &noopMetrics{}
}
// New creates, initializes and returns new BlobStor instance.
func New(opts ...Option) *BlobStor {
bs := new(BlobStor)
- bs.mode = mode.Disabled
initConfig(&bs.cfg)
for i := range opts {
@@ -91,19 +89,35 @@ func WithStorages(st []SubStorage) Option {
// WithLogger returns option to specify BlobStor's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
}
}
-func WithCompression(comp compression.Config) Option {
+// WithCompressObjects returns option to toggle
+// compression of the stored objects.
+//
+// If true, Zstandard algorithm is used for data compression.
+//
+// If compressor (decompressor) creation failed,
+// the uncompressed option will be used, and the error
+// is recorded in the provided log.
+func WithCompressObjects(comp bool) Option {
return func(c *cfg) {
- c.compression.Config = comp
+ c.compression.Enabled = comp
+ }
+}
+
+// WithUncompressableContentTypes returns option to disable decompression
+// for specific content types as seen by object.AttributeContentType attribute.
+func WithUncompressableContentTypes(values []string) Option {
+ return func(c *cfg) {
+ c.compression.UncompressableContentTypes = values
}
}
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
-func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}
@@ -115,6 +129,6 @@ func WithMetrics(m Metrics) Option {
}
}
-func (b *BlobStor) Compressor() *compression.Compressor {
- return &b.compression
+func (b *BlobStor) Compressor() *compression.Config {
+ return &b.cfg.compression
}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index 6ddeb6f00..f1d567da7 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -9,17 +9,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
func defaultTestStorages(p string, smallSizeLimit uint64) ([]SubStorage, *teststore.TestStore, *teststore.TestStore) {
smallFileStorage := teststore.New(teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(filepath.Join(p, "blobovniczas")),
blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
))
@@ -52,18 +49,16 @@ func TestCompression(t *testing.T) {
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
bs := New(
- WithCompression(compression.Config{
- Enabled: compress,
- }),
+ WithCompressObjects(compress),
WithStorages(defaultStorages(dir, smallSizeLimit)))
- require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Open(context.Background(), false))
+ require.NoError(t, bs.Init())
return bs
}
bigObj := make([]*objectSDK.Object, objCount)
smallObj := make([]*objectSDK.Object, objCount)
- for i := range objCount {
+ for i := 0; i < objCount; i++ {
bigObj[i] = testObject(smallSizeLimit * 2)
smallObj[i] = testObject(smallSizeLimit / 2)
}
@@ -94,20 +89,20 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
- require.NoError(t, blobStor.Close(context.Background()))
+ require.NoError(t, blobStor.Close())
}
func TestBlobstor_needsCompression(t *testing.T) {
@@ -116,14 +111,11 @@ func TestBlobstor_needsCompression(t *testing.T) {
dir := t.TempDir()
bs := New(
- WithCompression(compression.Config{
- Enabled: compress,
- UncompressableContentTypes: ct,
- }),
+ WithCompressObjects(compress),
+ WithUncompressableContentTypes(ct),
WithStorages([]SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(filepath.Join(dir, "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
Policy: func(_ *objectSDK.Object, data []byte) bool {
@@ -134,8 +126,8 @@ func TestBlobstor_needsCompression(t *testing.T) {
Storage: fstree.New(fstree.WithPath(dir)),
},
}))
- require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
+ require.NoError(t, bs.Open(context.Background(), false))
+ require.NoError(t, bs.Init())
return bs
}
@@ -196,8 +188,8 @@ func TestConcurrentPut(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
- require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Open(context.Background(), false))
+ require.NoError(t, blobStor.Init())
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
@@ -224,7 +216,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for range concurrentPutCount {
+ for i := 0; i < concurrentPutCount; i++ {
wg.Add(1)
go func() {
testPut(t, blobStor, bigObj)
@@ -240,7 +232,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for range concurrentPutCount + 1 {
+ for i := 0; i < concurrentPutCount+1; i++ {
wg.Add(1)
go func() {
testPutFileExistsError(t, blobStor, bigObj)
@@ -256,7 +248,7 @@ func TestConcurrentPut(t *testing.T) {
smallObj := testObject(smallSizeLimit / 2)
var wg sync.WaitGroup
- for range concurrentPutCount {
+ for i := 0; i < concurrentPutCount; i++ {
wg.Add(1)
go func() {
testPut(t, blobStor, smallObj)
@@ -276,8 +268,8 @@ func TestConcurrentDelete(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
- require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Open(context.Background(), false))
+ require.NoError(t, blobStor.Init())
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
var prm common.PutPrm
@@ -307,7 +299,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, bigObj)
var wg sync.WaitGroup
- for range 2 {
+ for i := 0; i < 2; i++ {
wg.Add(1)
go func() {
testDelete(t, blobStor, bigObj)
@@ -324,7 +316,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, smallObj)
var wg sync.WaitGroup
- for range 2 {
+ for i := 0; i < 2; i++ {
wg.Add(1)
go func() {
testDelete(t, blobStor, smallObj)
diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go
index c19e099cb..1b04eab1a 100644
--- a/pkg/local_object_storage/blobstor/common/delete.go
+++ b/pkg/local_object_storage/blobstor/common/delete.go
@@ -8,7 +8,6 @@ import (
type DeletePrm struct {
Address oid.Address
StorageID []byte
- Size uint64
}
// DeleteRes groups the resulting values of Delete operation.
diff --git a/pkg/local_object_storage/blobstor/common/iterate.go b/pkg/local_object_storage/blobstor/common/iterate.go
index a1b8ff047..a6f0da26b 100644
--- a/pkg/local_object_storage/blobstor/common/iterate.go
+++ b/pkg/local_object_storage/blobstor/common/iterate.go
@@ -15,7 +15,9 @@ type IterationHandler func(IterationElement) error
// IteratePrm groups the parameters of Iterate operation.
type IteratePrm struct {
Handler IterationHandler
+ LazyHandler func(oid.Address, func() ([]byte, error)) error
IgnoreErrors bool
+ ErrorHandler func(oid.Address, error) error
}
// IterateRes groups the resulting values of Iterate operation.
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
deleted file mode 100644
index 788fe66f2..000000000
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package common
-
-import (
- "context"
-
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type RebuildRes struct {
- ObjectsMoved uint64
- FilesRemoved uint64
-}
-
-type RebuildPrm struct {
- MetaStorage MetaStorage
- Limiter RebuildLimiter
- FillPercent int
-}
-
-type MetaStorage interface {
- UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
-}
-
-type ReleaseFunc func()
-
-type ConcurrencyLimiter interface {
- AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error)
-}
-
-type RateLimiter interface {
- ReadRequest(context.Context) (ReleaseFunc, error)
- WriteRequest(context.Context) (ReleaseFunc, error)
-}
-
-type RebuildLimiter interface {
- ConcurrencyLimiter
- RateLimiter
-}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index e35c35e60..852f020cc 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -4,26 +4,24 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
// Storage represents key-value object storage.
// It is used as a building block for a blobstor of a shard.
type Storage interface {
- Open(mode mode.ComponentMode) error
+ Open(readOnly bool) error
Init() error
- Close(context.Context) error
+ Close() error
Type() string
Path() string
- ObjectsCount(ctx context.Context) (uint64, error)
- SetCompressor(cc *compression.Compressor)
- Compressor() *compression.Compressor
+ SetCompressor(cc *compression.Config)
+ Compressor() *compression.Config
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
- SetReportErrorFunc(f func(context.Context, string, error))
+ SetReportErrorFunc(f func(string, error))
SetParentID(parentID string)
Get(context.Context, GetPrm) (GetRes, error)
@@ -32,5 +30,4 @@ type Storage interface {
Put(context.Context, PutPrm) (PutRes, error)
Delete(context.Context, DeletePrm) (DeleteRes, error)
Iterate(context.Context, IteratePrm) (IterateRes, error)
- Rebuild(context.Context, RebuildPrm) (RebuildRes, error)
}
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 445a0494b..6e05366cf 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -3,15 +3,13 @@ package compression
import (
"crypto/rand"
"fmt"
- "log"
"testing"
- "github.com/klauspost/compress"
"github.com/stretchr/testify/require"
)
func BenchmarkCompression(b *testing.B) {
- c := Compressor{Config: Config{Enabled: true}}
+ c := Config{Enabled: true}
require.NoError(b, c.Init())
for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} {
@@ -33,10 +31,10 @@ func BenchmarkCompression(b *testing.B) {
}
}
-func benchWith(b *testing.B, c Compressor, data []byte) {
+func benchWith(b *testing.B, c Config, data []byte) {
b.ResetTimer()
b.ReportAllocs()
- for range b.N {
+ for i := 0; i < b.N; i++ {
_ = c.Compress(data)
}
}
@@ -49,54 +47,3 @@ func notSoRandomSlice(size, blockSize int) []byte {
}
return data
}
-
-func BenchmarkCompressionRealVSEstimate(b *testing.B) {
- var total float64 // to prevent from compiler optimizations
- maxSize := 60 * 1024 * 1024
- b.Run("estimate", func(b *testing.B) {
- b.ResetTimer()
-
- c := &Compressor{
- Config: Config{
- Enabled: true,
- },
- }
- require.NoError(b, c.Init())
-
- for size := 1024; size <= maxSize; size *= 2 {
- data := make([]byte, size)
- _, err := rand.Reader.Read(data)
- require.NoError(b, err)
-
- b.StartTimer()
- estimation := compress.Estimate(data)
- total += estimation
- b.StopTimer()
- }
- })
-
- b.Run("compress", func(b *testing.B) {
- b.ResetTimer()
-
- c := &Compressor{
- Config: Config{
- Enabled: true,
- },
- }
- require.NoError(b, c.Init())
-
- for size := 1024; size <= maxSize; size *= 2 {
- data := make([]byte, size)
- _, err := rand.Reader.Read(data)
- require.NoError(b, err)
-
- b.StartTimer()
- maxSize := c.encoder.MaxEncodedSize(len(data))
- compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
- total += float64(len(compressed)) / float64(len(data))
- b.StopTimer()
- }
- })
-
- log.Println(total)
-}
diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go
index c76cec9a1..270c2b18d 100644
--- a/pkg/local_object_storage/blobstor/compression/compress.go
+++ b/pkg/local_object_storage/blobstor/compression/compress.go
@@ -4,36 +4,17 @@ import (
"bytes"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/klauspost/compress"
"github.com/klauspost/compress/zstd"
)
-type Level string
-
-const (
- LevelDefault Level = ""
- LevelOptimal Level = "optimal"
- LevelFastest Level = "fastest"
- LevelSmallestSize Level = "smallest_size"
-)
-
-type Compressor struct {
- Config
-
- encoder *zstd.Encoder
- decoder *zstd.Decoder
-}
-
// Config represents common compression-related configuration.
type Config struct {
Enabled bool
UncompressableContentTypes []string
- Level Level
- EstimateCompressibility bool
- EstimateCompressibilityThreshold float64
+ encoder *zstd.Encoder
+ decoder *zstd.Decoder
}
// zstdFrameMagic contains first 4 bytes of any compressed object
@@ -41,11 +22,11 @@ type Config struct {
var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
// Init initializes compression routines.
-func (c *Compressor) Init() error {
+func (c *Config) Init() error {
var err error
if c.Enabled {
- c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel()))
+ c.encoder, err = zstd.NewWriter(nil)
if err != nil {
return err
}
@@ -88,7 +69,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool {
// Decompress decompresses data if it starts with the magic
// and returns data untouched otherwise.
-func (c *Compressor) Decompress(data []byte) ([]byte, error) {
+func (c *Config) Decompress(data []byte) ([]byte, error) {
if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) {
return data, nil
}
@@ -97,21 +78,10 @@ func (c *Compressor) Decompress(data []byte) ([]byte, error) {
// Compress compresses data if compression is enabled
// and returns data untouched otherwise.
-func (c *Compressor) Compress(data []byte) []byte {
+func (c *Config) Compress(data []byte) []byte {
if c == nil || !c.Enabled {
return data
}
- if c.EstimateCompressibility {
- estimated := compress.Estimate(data)
- if estimated >= c.EstimateCompressibilityThreshold {
- return c.compress(data)
- }
- return data
- }
- return c.compress(data)
-}
-
-func (c *Compressor) compress(data []byte) []byte {
maxSize := c.encoder.MaxEncodedSize(len(data))
compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize))
if len(data) < len(compressed) {
@@ -121,7 +91,7 @@ func (c *Compressor) compress(data []byte) []byte {
}
// Close closes encoder and decoder, returns any error occurred.
-func (c *Compressor) Close() error {
+func (c *Config) Close() error {
var err error
if c.encoder != nil {
err = c.encoder.Close()
@@ -131,24 +101,3 @@ func (c *Compressor) Close() error {
}
return err
}
-
-func (c *Config) HasValidCompressionLevel() bool {
- return c.Level == LevelDefault ||
- c.Level == LevelOptimal ||
- c.Level == LevelFastest ||
- c.Level == LevelSmallestSize
-}
-
-func (c *Compressor) compressionLevel() zstd.EncoderLevel {
- switch c.Level {
- case LevelDefault, LevelOptimal:
- return zstd.SpeedDefault
- case LevelFastest:
- return zstd.SpeedFastest
- case LevelSmallestSize:
- return zstd.SpeedBestCompression
- default:
- assert.Fail("unknown compression level", string(c.Level))
- return zstd.SpeedDefault
- }
-}
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 0418eedd0..4b8a36de8 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -6,40 +6,25 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"go.uber.org/zap"
)
// Open opens BlobStor.
-func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
- b.log.Debug(ctx, logs.BlobstorOpening)
+func (b *BlobStor) Open(ctx context.Context, readOnly bool) error {
+ b.log.Debug(logs.BlobstorOpening)
- b.modeMtx.Lock()
- defer b.modeMtx.Unlock()
- b.mode = mode
-
- err := b.openBlobStor(ctx, mode)
- if err != nil {
- return err
- }
- b.metrics.SetMode(mode.ReadOnly())
-
- return nil
-}
-
-func (b *BlobStor) openBlobStor(ctx context.Context, mod mode.Mode) error {
for i := range b.storage {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
- err := b.storage[i].Storage.Open(mode.ConvertToComponentMode(mod))
+ err := b.storage[i].Storage.Open(readOnly)
if err != nil {
return err
}
}
+ b.metrics.SetMode(readOnly)
return nil
}
@@ -51,13 +36,9 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
-func (b *BlobStor) Init(ctx context.Context) error {
- b.log.Debug(ctx, logs.BlobstorInitializing)
+func (b *BlobStor) Init() error {
+ b.log.Debug(logs.BlobstorInitializing)
- if !b.compression.HasValidCompressionLevel() {
- b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level)))
- b.compression.Level = compression.LevelDefault
- }
if err := b.compression.Init(); err != nil {
return err
}
@@ -72,14 +53,14 @@ func (b *BlobStor) Init(ctx context.Context) error {
}
// Close releases all internal resources of BlobStor.
-func (b *BlobStor) Close(ctx context.Context) error {
- b.log.Debug(ctx, logs.BlobstorClosing)
+func (b *BlobStor) Close() error {
+ b.log.Debug(logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
- err := b.storage[i].Storage.Close(ctx)
+ err := b.storage[i].Storage.Close()
if err != nil {
- b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
+ b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index 86d8f15e3..8930980a6 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -6,10 +6,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -39,12 +37,11 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
if err == nil || !client.IsErrObjectNotFound(err) {
if err == nil {
success = true
- logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
+ logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
}
return res, err
}
}
- return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
var st common.Storage
@@ -58,7 +55,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
res, err := st.Delete(ctx, prm)
if err == nil {
success = true
- logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
+ logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
return res, err
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index c155e15b8..43feec7c9 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -72,9 +73,10 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
+ b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return common.ExistsRes{}, errors[len(errors)-1]
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 7eb7d49bf..367b63af1 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -7,7 +7,6 @@ import (
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -21,8 +20,8 @@ func TestExists(t *testing.T) {
b := New(WithStorages(storages))
- require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, b.Init(context.Background()))
+ require.NoError(t, b.Open(context.Background(), false))
+ require.NoError(t, b.Init())
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),
diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go
index 2544729f7..c56312d38 100644
--- a/pkg/local_object_storage/blobstor/fstree/control.go
+++ b/pkg/local_object_storage/blobstor/fstree/control.go
@@ -1,16 +1,13 @@
package fstree
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
// Open implements common.Storage.
-func (t *FSTree) Open(mode mode.ComponentMode) error {
- t.readOnly = mode.ReadOnly()
- t.metrics.SetMode(mode)
+func (t *FSTree) Open(ro bool) error {
+ t.readOnly = ro
+ t.metrics.SetMode(ro)
return nil
}
@@ -19,18 +16,11 @@ func (t *FSTree) Init() error {
if err := util.MkdirAllX(t.RootPath, t.Permissions); err != nil {
return err
}
- if !t.readOnly {
- f := newSpecificWriteData(t.fileCounter, t.RootPath, t.Permissions, t.noSync)
- if f != nil {
- t.writer = f
- }
- }
-
return t.initFileCounter()
}
// Close implements common.Storage.
-func (t *FSTree) Close(_ context.Context) error {
+func (t *FSTree) Close() error {
t.metrics.Close()
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
index 3caee7ee1..70b346093 100644
--- a/pkg/local_object_storage/blobstor/fstree/counter.go
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -1,69 +1,32 @@
package fstree
import (
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "math"
+ "sync/atomic"
)
// FileCounter used to count files in FSTree. The implementation must be thread-safe.
type FileCounter interface {
- Set(count, size uint64)
- Inc(size uint64)
- Dec(size uint64)
+ Set(v uint64)
+ Inc()
+ Dec()
}
type noopCounter struct{}
-func (c *noopCounter) Set(uint64, uint64) {}
-func (c *noopCounter) Inc(uint64) {}
-func (c *noopCounter) Dec(uint64) {}
-
-func counterEnabled(c FileCounter) bool {
- _, noop := c.(*noopCounter)
- return !noop
-}
+func (c *noopCounter) Set(uint64) {}
+func (c *noopCounter) Inc() {}
+func (c *noopCounter) Dec() {}
type SimpleCounter struct {
- mtx sync.RWMutex
- count uint64
- size uint64
+ v atomic.Uint64
}
func NewSimpleCounter() *SimpleCounter {
return &SimpleCounter{}
}
-func (c *SimpleCounter) Set(count, size uint64) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- c.count = count
- c.size = size
-}
-
-func (c *SimpleCounter) Inc(size uint64) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- c.count++
- c.size += size
-}
-
-func (c *SimpleCounter) Dec(size uint64) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- assert.True(c.count > 0, "fstree.SimpleCounter: invalid count")
- c.count--
-
- assert.True(c.size >= size, "fstree.SimpleCounter: invalid size")
- c.size -= size
-}
-
-func (c *SimpleCounter) CountSize() (uint64, uint64) {
- c.mtx.RLock()
- defer c.mtx.RUnlock()
-
- return c.count, c.size
-}
+func (c *SimpleCounter) Set(v uint64) { c.v.Store(v) }
+func (c *SimpleCounter) Inc() { c.v.Add(1) }
+func (c *SimpleCounter) Dec() { c.v.Add(math.MaxUint64) }
+func (c *SimpleCounter) Value() uint64 { return c.v.Load() }
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 112741ab4..243a7239e 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -10,15 +10,14 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync/atomic"
"syscall"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -26,7 +25,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
type keyLock interface {
@@ -43,9 +41,7 @@ func (l *noopKeyLock) Unlock(string) {}
type FSTree struct {
Info
- log *logger.Logger
-
- compressor *compression.Compressor
+ *compression.Config
Depth uint64
DirNameLen int
@@ -53,9 +49,11 @@ type FSTree struct {
readOnly bool
metrics Metrics
- fileCounter FileCounter
+ fileGuard keyLock
+ fileCounter FileCounter
+ fileCounterEnabled bool
- writer writer
+ suffix atomic.Uint64
}
// Info groups the information about file storage.
@@ -79,20 +77,19 @@ var _ common.Storage = (*FSTree)(nil)
func New(opts ...Option) *FSTree {
f := &FSTree{
Info: Info{
- Permissions: 0o700,
+ Permissions: 0700,
RootPath: "./",
},
- compressor: nil,
+ Config: nil,
Depth: 4,
DirNameLen: DirNameLen,
metrics: &noopMetrics{},
+ fileGuard: &noopKeyLock{},
fileCounter: &noopCounter{},
- log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
opts[i](f)
}
- f.writer = newGenericWriteData(f.fileCounter, f.Permissions, f.noSync)
return f
}
@@ -148,13 +145,9 @@ func (t *FSTree) Iterate(ctx context.Context, prm common.IteratePrm) (common.Ite
func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, prm common.IteratePrm) error {
curName := strings.Join(curPath[1:], "")
- dirPath := filepath.Join(curPath...)
- des, err := os.ReadDir(dirPath)
+ des, err := os.ReadDir(filepath.Join(curPath...))
if err != nil {
if prm.IgnoreErrors {
- t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Error(err),
- zap.String("directory_path", dirPath))
return nil
}
return err
@@ -189,106 +182,37 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
if err != nil {
continue
}
- path := filepath.Join(curPath...)
- data, err := os.ReadFile(path)
+
+ data, err := os.ReadFile(filepath.Join(curPath...))
if err != nil && os.IsNotExist(err) {
continue
}
- if err == nil {
- data, err = t.compressor.Decompress(data)
- }
- if err != nil {
- if prm.IgnoreErrors {
- t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.Stringer("address", addr),
- zap.Error(err),
- zap.String("path", path))
- continue
+ if prm.LazyHandler != nil {
+ err = prm.LazyHandler(addr, func() ([]byte, error) {
+ return data, err
+ })
+ } else {
+ if err == nil {
+ data, err = t.Decompress(data)
}
- return err
- }
-
- err = prm.Handler(common.IterationElement{
- Address: addr,
- ObjectData: data,
- StorageID: []byte{},
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type ObjectInfo struct {
- Address oid.Address
- DataSize uint64
-}
-type IterateInfoHandler func(ObjectInfo) error
-
-func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error {
- var (
- err error
- startedAt = time.Now()
- )
- defer func() {
- t.metrics.IterateInfo(time.Since(startedAt), err == nil)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo")
- defer span.End()
-
- return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler)
-}
-
-func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error {
- curName := strings.Join(curPath[1:], "")
- dirPath := filepath.Join(curPath...)
- entries, err := os.ReadDir(dirPath)
- if err != nil {
- return fmt.Errorf("read fstree dir '%s': %w", dirPath, err)
- }
-
- isLast := depth >= t.Depth
- l := len(curPath)
- curPath = append(curPath, "")
-
- for i := range entries {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- curPath[l] = entries[i].Name()
-
- if !isLast && entries[i].IsDir() {
- err := t.iterateInfo(ctx, depth+1, curPath, handler)
if err != nil {
+ if prm.IgnoreErrors {
+ if prm.ErrorHandler != nil {
+ return prm.ErrorHandler(addr, err)
+ }
+ continue
+ }
return err
}
+
+ err = prm.Handler(common.IterationElement{
+ Address: addr,
+ ObjectData: data,
+ StorageID: []byte{},
+ })
}
- if depth != t.Depth {
- continue
- }
-
- addr, err := addressFromString(curName + entries[i].Name())
- if err != nil {
- continue
- }
- info, err := entries[i].Info()
- if err != nil {
- if os.IsNotExist(err) {
- continue
- }
- return err
- }
-
- err = handler(ObjectInfo{
- Address: addr,
- DataSize: uint64(info.Size()),
- })
if err != nil {
return err
}
@@ -338,7 +262,21 @@ func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Delet
}
p := t.treePath(prm.Address)
- err = t.writer.removeFile(p, prm.Size)
+
+ if t.fileCounterEnabled {
+ t.fileGuard.Lock(p)
+ err = os.Remove(p)
+ t.fileGuard.Unlock(p)
+ if err == nil {
+ t.fileCounter.Dec()
+ }
+ } else {
+ err = os.Remove(p)
+ }
+
+ if err != nil && os.IsNotExist(err) {
+ err = logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
return common.DeleteRes{}, err
}
@@ -405,11 +343,71 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err
return common.PutRes{}, err
}
if !prm.DontCompress {
- prm.RawData = t.compressor.Compress(prm.RawData)
+ prm.RawData = t.Compress(prm.RawData)
}
size = len(prm.RawData)
- return common.PutRes{StorageID: []byte{}}, t.writer.writeData(p, prm.RawData)
+ tmpPath := p + "#" + strconv.FormatUint(t.suffix.Add(1), 10)
+ err = t.writeAndRename(tmpPath, p, prm.RawData)
+ return common.PutRes{StorageID: []byte{}}, err
+}
+
+// writeAndRename opens tmpPath exclusively, writes data to it and renames it to p.
+func (t *FSTree) writeAndRename(tmpPath, p string, data []byte) error {
+ if t.fileCounterEnabled {
+ t.fileGuard.Lock(p)
+ defer t.fileGuard.Unlock(p)
+ }
+
+ err := t.writeFile(tmpPath, data)
+ if err != nil {
+ var pe *fs.PathError
+ if errors.As(err, &pe) {
+ switch pe.Err {
+ case syscall.ENOSPC:
+ err = common.ErrNoSpace
+ _ = os.RemoveAll(tmpPath)
+ }
+ }
+ return err
+ }
+
+ if t.fileCounterEnabled {
+ t.fileCounter.Inc()
+ var targetFileExists bool
+ if _, e := os.Stat(p); e == nil {
+ targetFileExists = true
+ }
+ err = os.Rename(tmpPath, p)
+ if err == nil && targetFileExists {
+ t.fileCounter.Dec()
+ }
+ } else {
+ err = os.Rename(tmpPath, p)
+ }
+ return err
+}
+
+func (t *FSTree) writeFlags() int {
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_EXCL
+ if t.noSync {
+ return flags
+ }
+ return flags | os.O_SYNC
+}
+
+// writeFile writes data to a file with path p.
+// The code is copied from `os.WriteFile` with minor corrections for flags.
+func (t *FSTree) writeFile(p string, data []byte) error {
+ f, err := os.OpenFile(p, t.writeFlags(), t.Permissions)
+ if err != nil {
+ return err
+ }
+ _, err = f.Write(data)
+ if err1 := f.Close(); err1 != nil && err == nil {
+ err = err1
+ }
+ return err
}
// Get returns an object from the storage by address.
@@ -433,6 +431,10 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err
p := t.treePath(prm.Address)
+ if _, err := os.Stat(p); os.IsNotExist(err) {
+ return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
var data []byte
var err error
{
@@ -448,7 +450,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err
}
}
- data, err = t.compressor.Decompress(data)
+ data, err = t.Decompress(data)
if err != nil {
return common.GetRes{}, err
}
@@ -506,81 +508,36 @@ func (t *FSTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.G
// initFileCounter walks the file tree rooted at FSTree's root,
// counts total items count, inits counter and returns number of stored objects.
func (t *FSTree) initFileCounter() error {
- if !counterEnabled(t.fileCounter) {
+ if !t.fileCounterEnabled {
return nil
}
- count, size, err := t.countFiles()
+ counter, err := t.countFiles()
if err != nil {
return err
}
- t.fileCounter.Set(count, size)
+ t.fileCounter.Set(counter)
return nil
}
-func (t *FSTree) countFiles() (uint64, uint64, error) {
- var count, size uint64
+func (t *FSTree) countFiles() (uint64, error) {
+ var counter uint64
// it is simpler to just consider every file
// that is not directory as an object
err := filepath.WalkDir(t.RootPath,
func(_ string, d fs.DirEntry, _ error) error {
- if d.IsDir() {
- return nil
- }
- count++
- info, err := d.Info()
- if err != nil {
- return err
- }
- size += uint64(info.Size())
-
- return nil
- },
- )
- if err != nil {
- return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
- }
-
- return count, size, nil
-}
-
-func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.ObjectsCount(time.Since(startedAt), success)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.ObjectsCount",
- trace.WithAttributes(
- attribute.String("path", t.RootPath),
- ))
- defer span.End()
-
- var result uint64
-
- err := filepath.WalkDir(t.RootPath,
- func(_ string, d fs.DirEntry, _ error) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
if !d.IsDir() {
- result++
+ counter++
}
return nil
},
)
if err != nil {
- return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
+ return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
- success = true
- return result, nil
+
+ return counter, nil
}
// Type is fstree storage type used in logs and configuration.
@@ -597,23 +554,19 @@ func (t *FSTree) Path() string {
}
// SetCompressor implements common.Storage.
-func (t *FSTree) SetCompressor(cc *compression.Compressor) {
- t.compressor = cc
+func (t *FSTree) SetCompressor(cc *compression.Config) {
+ t.Config = cc
}
-func (t *FSTree) Compressor() *compression.Compressor {
- return t.compressor
+func (t *FSTree) Compressor() *compression.Config {
+ return t.Config
}
// SetReportErrorFunc implements common.Storage.
-func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
+func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}
func (t *FSTree) SetParentID(parentID string) {
t.metrics.SetParentID(parentID)
}
-
-func (t *FSTree) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
- return common.RebuildRes{}, nil
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index 50dae46a7..b81ce43f1 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -6,7 +6,6 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -28,7 +27,7 @@ func Benchmark_addressFromString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
- for range b.N {
+ for i := 0; i < b.N; i++ {
_, err := addressFromString(s)
if err != nil {
b.Fatalf("benchmark error: %v", err)
@@ -44,15 +43,14 @@ func TestObjectCounter(t *testing.T) {
WithDepth(2),
WithDirNameLen(2),
WithFileCounter(counter))
- require.NoError(t, fst.Open(mode.ComponentReadWrite))
+ require.NoError(t, fst.Open(false))
require.NoError(t, fst.Init())
- count, size := counter.CountSize()
- require.Equal(t, uint64(0), count)
- require.Equal(t, uint64(0), size)
+ counterValue := counter.Value()
+ require.Equal(t, uint64(0), counterValue)
defer func() {
- require.NoError(t, fst.Close(context.Background()))
+ require.NoError(t, fst.Close())
}()
addr := oidtest.Address()
@@ -65,73 +63,39 @@ func TestObjectCounter(t *testing.T) {
putPrm.Address = addr
putPrm.RawData, _ = obj.Marshal()
+ var getPrm common.GetPrm
+ getPrm.Address = putPrm.Address
+
var delPrm common.DeletePrm
delPrm.Address = addr
- t.Run("without size hint", func(t *testing.T) {
- eg, egCtx := errgroup.WithContext(context.Background())
+ eg, egCtx := errgroup.WithContext(context.Background())
- eg.Go(func() error {
- for range 1_000 {
- _, err := fst.Put(egCtx, putPrm)
- if err != nil {
- return err
- }
+ eg.Go(func() error {
+ for j := 0; j < 1_000; j++ {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
}
- return nil
- })
-
- eg.Go(func() error {
- var le logicerr.Logical
- for range 1_000 {
- _, err := fst.Delete(egCtx, delPrm)
- if err != nil && !errors.As(err, &le) {
- return err
- }
- }
- return nil
- })
-
- require.NoError(t, eg.Wait())
-
- count, size = counter.CountSize()
- realCount, realSize, err := fst.countFiles()
- require.NoError(t, err)
- require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
- require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
+ }
+ return nil
})
- t.Run("with size hint", func(t *testing.T) {
- delPrm.Size = uint64(len(putPrm.RawData))
- eg, egCtx := errgroup.WithContext(context.Background())
-
- eg.Go(func() error {
- for range 1_000 {
- _, err := fst.Put(egCtx, putPrm)
- if err != nil {
- return err
- }
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for j := 0; j < 1_000; j++ {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
}
- return nil
- })
-
- eg.Go(func() error {
- var le logicerr.Logical
- for range 1_000 {
- _, err := fst.Delete(egCtx, delPrm)
- if err != nil && !errors.As(err, &le) {
- return err
- }
- }
- return nil
- })
-
- require.NoError(t, eg.Wait())
-
- count, size = counter.CountSize()
- realCount, realSize, err := fst.countFiles()
- require.NoError(t, err)
- require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
- require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
+ }
+ return nil
})
+
+ require.NoError(t, eg.Wait())
+
+ counterValue = counter.Value()
+ realCount, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, counterValue)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
deleted file mode 100644
index 6d633dad6..000000000
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package fstree
-
-import (
- "errors"
- "io/fs"
- "os"
- "strconv"
- "sync/atomic"
- "syscall"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-type writer interface {
- writeData(string, []byte) error
- removeFile(string, uint64) error
-}
-
-type genericWriter struct {
- perm fs.FileMode
- flags int
-
- fileGuard keyLock
- fileCounter FileCounter
- fileCounterEnabled bool
- suffix atomic.Uint64
-}
-
-func newGenericWriteData(c FileCounter, perm fs.FileMode, noSync bool) writer {
- flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | os.O_EXCL
- if !noSync {
- flags |= os.O_SYNC
- }
-
- var fileGuard keyLock = &noopKeyLock{}
- fileCounterEnabled := counterEnabled(c)
- if fileCounterEnabled {
- fileGuard = utilSync.NewKeyLocker[string]()
- }
-
- w := &genericWriter{
- perm: perm,
- flags: flags,
-
- fileCounterEnabled: fileCounterEnabled,
- fileGuard: fileGuard,
- fileCounter: c,
- }
- return w
-}
-
-func (w *genericWriter) writeData(p string, data []byte) error {
- tmpPath := p + "#" + strconv.FormatUint(w.suffix.Add(1), 10)
- return w.writeAndRename(tmpPath, p, data)
-}
-
-// writeAndRename opens tmpPath exclusively, writes data to it and renames it to p.
-func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
- if w.fileCounterEnabled {
- w.fileGuard.Lock(p)
- defer w.fileGuard.Unlock(p)
- }
-
- err := w.writeFile(tmpPath, data)
- if err != nil {
- var pe *fs.PathError
- if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) {
- err = common.ErrNoSpace
- _ = os.RemoveAll(tmpPath)
- }
- return err
- }
-
- if w.fileCounterEnabled {
- w.fileCounter.Inc(uint64(len(data)))
- var targetFileExists bool
- if _, e := os.Stat(p); e == nil {
- targetFileExists = true
- }
- err = os.Rename(tmpPath, p)
- if err == nil && targetFileExists {
- w.fileCounter.Dec(uint64(len(data)))
- }
- } else {
- err = os.Rename(tmpPath, p)
- }
- return err
-}
-
-// writeFile writes data to a file with path p.
-// The code is copied from `os.WriteFile` with minor corrections for flags.
-func (w *genericWriter) writeFile(p string, data []byte) error {
- f, err := os.OpenFile(p, w.flags, w.perm)
- if err != nil {
- return err
- }
- _, err = f.Write(data)
- if err1 := f.Close(); err1 != nil && err == nil {
- err = err1
- }
- return err
-}
-
-func (w *genericWriter) removeFile(p string, size uint64) error {
- var err error
- if w.fileCounterEnabled {
- err = w.removeWithCounter(p, size)
- } else {
- err = os.Remove(p)
- }
-
- if err != nil && os.IsNotExist(err) {
- err = logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
- return err
-}
-
-func (w *genericWriter) removeWithCounter(p string, size uint64) error {
- w.fileGuard.Lock(p)
- defer w.fileGuard.Unlock(p)
-
- if size == 0 {
- stat, err := os.Stat(p)
- if err != nil {
- return err
- }
- size = uint64(stat.Size())
- }
-
- if err := os.Remove(p); err != nil {
- return err
- }
- w.fileCounter.Dec(size)
- return nil
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
deleted file mode 100644
index 49cbda344..000000000
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ /dev/null
@@ -1,137 +0,0 @@
-//go:build linux && !fstree_generic
-
-package fstree
-
-import (
- "errors"
- "io/fs"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "golang.org/x/sys/unix"
-)
-
-type linuxWriter struct {
- root string
- perm uint32
- flags int
-
- fileGuard keyLock
- fileCounter FileCounter
- fileCounterEnabled bool
-}
-
-func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer {
- flags := unix.O_WRONLY | unix.O_TMPFILE | unix.O_CLOEXEC
- if !noSync {
- flags |= unix.O_DSYNC
- }
- fd, err := unix.Open(root, flags, uint32(perm))
- if err != nil {
- // Which means that OS-specific writeData can't be created
- // and FSTree should use the generic one.
- return nil
- }
- _ = unix.Close(fd) // Don't care about error.
- var fileGuard keyLock = &noopKeyLock{}
- fileCounterEnabled := counterEnabled(c)
- if fileCounterEnabled {
- fileGuard = utilSync.NewKeyLocker[string]()
- }
- w := &linuxWriter{
- root: root,
- perm: uint32(perm),
- flags: flags,
- fileGuard: fileGuard,
- fileCounter: c,
- fileCounterEnabled: fileCounterEnabled,
- }
- return w
-}
-
-func (w *linuxWriter) writeData(p string, data []byte) error {
- err := w.writeFile(p, data)
- if errors.Is(err, unix.ENOSPC) {
- return common.ErrNoSpace
- }
- return err
-}
-
-func (w *linuxWriter) writeFile(p string, data []byte) error {
- if w.fileCounterEnabled {
- w.fileGuard.Lock(p)
- defer w.fileGuard.Unlock(p)
- }
- fd, err := unix.Open(w.root, w.flags, w.perm)
- if err != nil {
- return err
- }
- written := 0
- tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10)
- n, err := unix.Write(fd, data)
- for err == nil {
- written += n
-
- if written == len(data) {
- err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
- if err == nil {
- w.fileCounter.Inc(uint64(len(data)))
- }
- if errors.Is(err, unix.EEXIST) {
- err = nil
- }
- break
- }
-
- // From man 2 write:
- // https://www.man7.org/linux/man-pages/man2/write.2.html
- //
- // Note that a successful write() may transfer fewer than count
- // bytes. Such partial writes can occur for various reasons; for
- // example, because there was insufficient space on the disk device
- // to write all of the requested bytes, or because a blocked write()
- // to a socket, pipe, or similar was interrupted by a signal handler
- // after it had transferred some, but before it had transferred all
- // of the requested bytes. In the event of a partial write, the
- // caller can make another write() call to transfer the remaining
- // bytes. The subsequent call will either transfer further bytes or
- // may result in an error (e.g., if the disk is now full).
- n, err = unix.Write(fd, data[written:])
- }
- errClose := unix.Close(fd)
- if err != nil {
- return err // Close() error is ignored, we have a better one.
- }
- return errClose
-}
-
-func (w *linuxWriter) removeFile(p string, size uint64) error {
- if w.fileCounterEnabled {
- w.fileGuard.Lock(p)
- defer w.fileGuard.Unlock(p)
-
- if size == 0 {
- var stat unix.Stat_t
- err := unix.Stat(p, &stat)
- if err != nil {
- if err == unix.ENOENT {
- return logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
- return err
- }
- size = uint64(stat.Size)
- }
- }
-
- err := unix.Unlink(p)
- if err != nil && err == unix.ENOENT {
- return logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
- if err == nil {
- w.fileCounter.Dec(size)
- }
- return err
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
deleted file mode 100644
index 7fae2e695..000000000
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-//go:build linux && integration
-
-package fstree
-
-import (
- "context"
- "errors"
- "os"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
- "golang.org/x/sys/unix"
-)
-
-func TestENOSPC(t *testing.T) {
- dir, err := os.MkdirTemp(t.TempDir(), "ramdisk")
- require.NoError(t, err)
-
- f, err := os.CreateTemp(t.TempDir(), "ramdisk_*")
- require.NoError(t, err)
-
- err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M")
- if errors.Is(err, unix.EPERM) {
- t.Skipf("skip size tests: no permission to mount: %v", err)
- return
- }
- require.NoError(t, err)
- defer func() {
- require.NoError(t, unix.Unmount(dir, 0))
- }()
-
- fst := New(WithPath(dir), WithDepth(1))
- require.NoError(t, fst.Open(mode.ComponentReadWrite))
- require.NoError(t, fst.Init())
-
- _, err = fst.Put(context.Background(), common.PutPrm{
- RawData: make([]byte, 10<<20),
- })
- require.ErrorIs(t, err, common.ErrNoSpace)
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go
deleted file mode 100644
index 67052d947..000000000
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_specific.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !linux || fstree_generic
-
-package fstree
-
-import (
- "io/fs"
-)
-
-func newSpecificWriteData(_ FileCounter, _ string, _ fs.FileMode, _ bool) writer {
- return nil
-}
diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go
index 4241beec9..ca6a54975 100644
--- a/pkg/local_object_storage/blobstor/fstree/metrics.go
+++ b/pkg/local_object_storage/blobstor/fstree/metrics.go
@@ -1,37 +1,29 @@
package fstree
-import (
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-)
+import "time"
type Metrics interface {
SetParentID(parentID string)
- SetMode(mode mode.ComponentMode)
+ SetMode(readOnly bool)
Close()
Iterate(d time.Duration, success bool)
- IterateInfo(d time.Duration, success bool)
Delete(d time.Duration, success bool)
Exists(d time.Duration, success bool)
Put(d time.Duration, size int, success bool)
Get(d time.Duration, size int, success bool)
GetRange(d time.Duration, size int, success bool)
- ObjectsCount(d time.Duration, success bool)
}
type noopMetrics struct{}
func (m *noopMetrics) SetParentID(string) {}
-func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) SetMode(bool) {}
func (m *noopMetrics) Close() {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
-func (m *noopMetrics) IterateInfo(time.Duration, bool) {}
func (m *noopMetrics) Delete(time.Duration, bool) {}
func (m *noopMetrics) Exists(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
func (m *noopMetrics) Get(time.Duration, int, bool) {}
func (m *noopMetrics) GetRange(time.Duration, int, bool) {}
-func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go
index 6f2ac87e1..21d46ac4d 100644
--- a/pkg/local_object_storage/blobstor/fstree/option.go
+++ b/pkg/local_object_storage/blobstor/fstree/option.go
@@ -3,7 +3,7 @@ package fstree
import (
"io/fs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
)
type Option func(*FSTree)
@@ -46,12 +46,8 @@ func WithMetrics(m Metrics) Option {
func WithFileCounter(c FileCounter) Option {
return func(f *FSTree) {
+ f.fileCounterEnabled = true
f.fileCounter = c
- }
-}
-
-func WithLogger(l *logger.Logger) Option {
- return func(f *FSTree) {
- f.log = l
+ f.fileGuard = utilSync.NewKeyLocker[string]()
}
}
diff --git a/pkg/local_object_storage/blobstor/generic_test.go b/pkg/local_object_storage/blobstor/generic_test.go
index b58ab8a68..204bdf61d 100644
--- a/pkg/local_object_storage/blobstor/generic_test.go
+++ b/pkg/local_object_storage/blobstor/generic_test.go
@@ -7,6 +7,7 @@ import (
)
func TestGeneric(t *testing.T) {
+
newMetabase := func(t *testing.T) storagetest.Component {
return New(
WithStorages(defaultStorages(t.TempDir(), 128)))
diff --git a/pkg/local_object_storage/blobstor/get.go b/pkg/local_object_storage/blobstor/get.go
index d00ef2f21..00ed18d7b 100644
--- a/pkg/local_object_storage/blobstor/get.go
+++ b/pkg/local_object_storage/blobstor/get.go
@@ -18,7 +18,9 @@ import (
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
func (b *BlobStor) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) {
- startedAt := time.Now()
+ var (
+ startedAt = time.Now()
+ )
defer func() {
b.metrics.Get(time.Since(startedAt), len(res.RawData), err == nil, prm.StorageID != nil)
}()
diff --git a/pkg/local_object_storage/blobstor/get_range.go b/pkg/local_object_storage/blobstor/get_range.go
index 9bded4720..50abd19fa 100644
--- a/pkg/local_object_storage/blobstor/get_range.go
+++ b/pkg/local_object_storage/blobstor/get_range.go
@@ -19,7 +19,9 @@ import (
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
func (b *BlobStor) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) {
- startedAt := time.Now()
+ var (
+ startedAt = time.Now()
+ )
defer func() {
b.metrics.GetRange(time.Since(startedAt), len(res.Data), err == nil, prm.StorageID != nil)
}()
diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go
index c1c47f3bb..2fd62af81 100644
--- a/pkg/local_object_storage/blobstor/info.go
+++ b/pkg/local_object_storage/blobstor/info.go
@@ -1,14 +1,5 @@
package blobstor
-import (
- "context"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "golang.org/x/sync/errgroup"
-)
-
// DumpInfo returns information about blob stor.
func (b *BlobStor) DumpInfo() Info {
b.modeMtx.RLock()
@@ -24,38 +15,3 @@ func (b *BlobStor) DumpInfo() Info {
SubStorages: sub,
}
}
-
-// ObjectsCount returns Blobstore's total objects count.
-func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
- var err error
- startedAt := time.Now()
- defer func() {
- b.metrics.ObjectsCount(time.Since(startedAt), err == nil)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.ObjectsCount")
- defer span.End()
-
- b.modeMtx.RLock()
- defer b.modeMtx.RUnlock()
-
- var result atomic.Uint64
-
- eg, egCtx := errgroup.WithContext(ctx)
- for i := range b.storage {
- eg.Go(func() error {
- v, e := b.storage[i].Storage.ObjectsCount(egCtx)
- if e != nil {
- return e
- }
- result.Add(v)
- return nil
- })
- }
-
- if err = eg.Wait(); err != nil {
- return 0, err
- }
-
- return result.Load(), nil
-}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
index 5d14a9a3a..c08e39bf1 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
@@ -27,21 +27,21 @@ type objectDesc struct {
storageID []byte
}
-func TestAll(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestAll(t *testing.T, cons Constructor, min, max uint64) {
t.Run("get", func(t *testing.T) {
- TestGet(t, cons, minSize, maxSize)
+ TestGet(t, cons, min, max)
})
t.Run("get range", func(t *testing.T) {
- TestGetRange(t, cons, minSize, maxSize)
+ TestGetRange(t, cons, min, max)
})
t.Run("delete", func(t *testing.T) {
- TestDelete(t, cons, minSize, maxSize)
+ TestDelete(t, cons, min, max)
})
t.Run("exists", func(t *testing.T) {
- TestExists(t, cons, minSize, maxSize)
+ TestExists(t, cons, min, max)
})
t.Run("iterate", func(t *testing.T) {
- TestIterate(t, cons, minSize, maxSize)
+ TestIterate(t, cons, min, max)
})
}
@@ -51,12 +51,12 @@ func TestInfo(t *testing.T, cons Constructor, expectedType string, expectedPath
require.Equal(t, expectedPath, s.Path())
}
-func prepare(t *testing.T, count int, s common.Storage, minSize, maxSize uint64) []objectDesc {
+func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objectDesc {
objects := make([]objectDesc, count)
r := mrand.New(mrand.NewSource(0))
for i := range objects {
- objects[i].obj = NewObject(minSize + uint64(r.Intn(int(maxSize-minSize+1)))) // not too large
+ objects[i].obj = NewObject(min + uint64(r.Intn(int(max-min+1)))) // not too large
objects[i].addr = objectCore.AddressOf(objects[i].obj)
raw, err := objects[i].obj.Marshal()
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index b8e88f84a..96d54dec3 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -7,21 +7,20 @@ import (
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
// TestControl checks correctness of a read-only mode.
// cons must return a storage which is NOT opened.
-func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestControl(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- objects := prepare(t, 10, s, minSize, maxSize)
- require.NoError(t, s.Close(context.Background()))
+ objects := prepare(t, 10, s, min, max)
+ require.NoError(t, s.Close())
- require.NoError(t, s.Open(mode.ComponentReadOnly))
+ require.NoError(t, s.Open(true))
for i := range objects {
var prm common.GetPrm
prm.Address = objects[i].addr
@@ -34,7 +33,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
t.Run("put fails", func(t *testing.T) {
var prm common.PutPrm
- prm.Object = NewObject(minSize + uint64(rand.Intn(int(maxSize-minSize+1))))
+ prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
prm.Address = objectCore.AddressOf(prm.Object)
_, err := s.Put(context.Background(), prm)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index 3a163f6b1..0bfac2772 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -5,19 +5,18 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ t.Cleanup(func() { require.NoError(t, s.Close()) })
- objects := prepare(t, 4, s, minSize, maxSize)
+ objects := prepare(t, 4, s, min, max)
t.Run("delete non-existent", func(t *testing.T) {
var prm common.DeletePrm
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index f34fe5f97..99f6a79e8 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -5,18 +5,17 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestExists(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ t.Cleanup(func() { require.NoError(t, s.Close()) })
- objects := prepare(t, 1, s, minSize, maxSize)
+ objects := prepare(t, 1, s, min, max)
t.Run("missing object", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index af0f4b45d..9a7ebed09 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -5,19 +5,18 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestGet(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ t.Cleanup(func() { require.NoError(t, s.Close()) })
- objects := prepare(t, 2, s, minSize, maxSize)
+ objects := prepare(t, 2, s, min, max)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetPrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index 13032048c..1a9da0726 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -6,20 +6,19 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
-func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ t.Cleanup(func() { require.NoError(t, s.Close()) })
- objects := prepare(t, 1, s, minSize, maxSize)
+ objects := prepare(t, 1, s, min, max)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetRangePrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index d54c54f59..34622c857 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -3,21 +3,20 @@ package blobstortest
import (
"context"
"errors"
- "slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
)
-func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
+func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+ t.Cleanup(func() { require.NoError(t, s.Close()) })
- objects := prepare(t, 10, s, minSize, maxSize)
+ objects := prepare(t, 10, s, min, max)
// Delete random object to ensure it is not iterated over.
const delID = 2
@@ -27,10 +26,12 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
_, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
- objects = slices.Delete(objects, delID, delID+1)
+ objects = append(objects[:delID], objects[delID+1:]...)
runTestNormalHandler(t, s, objects)
+ runTestLazyHandler(t, s, objects)
+
runTestIgnoreLogicalErrors(t, s, objects)
}
@@ -50,7 +51,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
_, err := s.Iterate(context.Background(), iterPrm)
require.NoError(t, err)
- require.Len(t, objects, len(seen))
+ require.Equal(t, len(objects), len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
require.True(t, ok)
@@ -61,12 +62,36 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
})
}
+func runTestLazyHandler(t *testing.T, s common.Storage, objects []objectDesc) {
+ t.Run("lazy handler", func(t *testing.T) {
+ seen := make(map[string]func() ([]byte, error))
+
+ var iterPrm common.IteratePrm
+ iterPrm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
+ seen[addr.String()] = f
+ return nil
+ }
+
+ _, err := s.Iterate(context.Background(), iterPrm)
+ require.NoError(t, err)
+ require.Equal(t, len(objects), len(seen))
+ for i := range objects {
+ f, ok := seen[objects[i].addr.String()]
+ require.True(t, ok)
+
+ data, err := f()
+ require.NoError(t, err)
+ require.Equal(t, objects[i].raw, data)
+ }
+ })
+}
+
func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("ignore errors doesn't work for logical errors", func(t *testing.T) {
seen := make(map[string]objectDesc)
var n int
- logicErr := errors.New("logic error")
+ var logicErr = errors.New("logic error")
var iterPrm common.IteratePrm
iterPrm.IgnoreErrors = true
iterPrm.Handler = func(elem common.IterationElement) error {
@@ -84,7 +109,7 @@ func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []object
_, err := s.Iterate(context.Background(), iterPrm)
require.Equal(t, err, logicErr)
- require.Len(t, seen, len(objects)/2)
+ require.Equal(t, len(objects)/2, len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
if ok {
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index ff1aa9d64..5a41e4c4f 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -40,14 +40,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
for i := range b.storage {
_, err := b.storage[i].Storage.Iterate(ctx, prm)
- if err != nil {
- if prm.IgnoreErrors {
- b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
- zap.String("storage_path", b.storage[i].Storage.Path()),
- zap.String("storage_type", b.storage[i].Storage.Type()),
- zap.Error(err))
- continue
- }
+ if err != nil && !prm.IgnoreErrors {
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
}
}
@@ -64,6 +57,12 @@ func IterateBinaryObjects(ctx context.Context, blz *BlobStor, f func(addr oid.Ad
return f(elem.Address, elem.ObjectData, elem.StorageID)
}
prm.IgnoreErrors = true
+ prm.ErrorHandler = func(addr oid.Address, err error) error {
+ blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+ zap.Stringer("address", addr),
+ zap.String("err", err.Error()))
+ return nil
+ }
_, err := blz.Iterate(ctx, prm)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index 2786321a8..ef3fda991 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -3,15 +3,10 @@ package blobstor
import (
"context"
"encoding/binary"
- "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -25,20 +20,18 @@ func TestIterateObjects(t *testing.T) {
// create BlobStor instance
blobStor := New(
WithStorages(defaultStorages(p, smalSz)),
- WithCompression(compression.Config{
- Enabled: true,
- }),
+ WithCompressObjects(true),
)
defer os.RemoveAll(p)
// open Blobstor
- require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, blobStor.Open(context.Background(), false))
// initialize Blobstor
- require.NoError(t, blobStor.Init(context.Background()))
+ require.NoError(t, blobStor.Init())
- defer blobStor.Close(context.Background())
+ defer blobStor.Close()
const objNum = 5
@@ -50,7 +43,7 @@ func TestIterateObjects(t *testing.T) {
mObjs := make(map[string]addrData)
- for i := range uint64(objNum) {
+ for i := uint64(0); i < objNum; i++ {
sz := smalSz
big := i < objNum/2
@@ -96,60 +89,117 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- ctx := context.Background()
-
- myErr := errors.New("unique error")
- nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
- panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
- errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
-
- var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
- st1 := teststore.New(
- teststore.WithSubstorage(memstore.New()),
- teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
- return s1iter(prm)
- }))
- st2 := teststore.New(
- teststore.WithSubstorage(memstore.New()),
- teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
- return s2iter(prm)
- }))
-
- bsOpts := []Option{WithStorages([]SubStorage{
- {Storage: st1},
- {Storage: st2},
- })}
- bs := New(bsOpts...)
- require.NoError(t, bs.Open(ctx, mode.ReadWrite))
- require.NoError(t, bs.Init(ctx))
-
- nopHandler := func(e common.IterationElement) error {
- return nil
- }
-
- t.Run("no errors", func(t *testing.T) {
- s1iter = nopIter
- s2iter = nopIter
- _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
- require.NoError(t, err)
- })
- t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
- s1iter = errIter
- s2iter = panicIter
- _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
- require.ErrorIs(t, err, myErr)
- })
-
- t.Run("ignore errors, storage 1", func(t *testing.T) {
- s1iter = errIter
- s2iter = nopIter
- _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
- require.NoError(t, err)
- })
- t.Run("ignore errors, storage 2", func(t *testing.T) {
- s1iter = nopIter
- s2iter = errIter
- _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
- require.NoError(t, err)
- })
+ t.Skip()
+ // dir := t.TempDir()
+ //
+ // const (
+ // smallSize = 512
+ // objCount = 5
+ // )
+ // bsOpts := []Option{
+ // WithCompressObjects(true),
+ // WithRootPath(dir),
+ // WithSmallSizeLimit(smallSize * 2), // + header
+ // WithBlobovniczaOpenedCacheSize(1),
+ // WithBlobovniczaShallowWidth(1),
+ // WithBlobovniczaShallowDepth(1)}
+ // bs := New(bsOpts...)
+ // require.NoError(t, bs.Open(false))
+ // require.NoError(t, bs.Init())
+ //
+ // addrs := make([]oid.Address, objCount)
+ // for i := range addrs {
+ // addrs[i] = oidtest.Address()
+ //
+ // obj := object.New()
+ // obj.SetContainerID(addrs[i].Container())
+ // obj.SetID(addrs[i].Object())
+ // obj.SetPayload(make([]byte, smallSize<<(i%2)))
+ //
+ // objData, err := obj.Marshal()
+ // require.NoError(t, err)
+ //
+ // _, err = bs.PutRaw(addrs[i], objData, true)
+ // require.NoError(t, err)
+ // }
+ //
+ // // Construct corrupted compressed object.
+ // buf := bytes.NewBuffer(nil)
+ // badObject := make([]byte, smallSize/2+1)
+ // enc, err := zstd.NewWriter(buf)
+ // require.NoError(t, err)
+ // rawData := enc.EncodeAll(badObject, nil)
+ // for i := 4; /* magic size */ i < len(rawData); i += 2 {
+ // rawData[i] ^= 0xFF
+ // }
+ // // Will be put uncompressed but fetched as compressed because of magic.
+ // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
+ // require.NoError(t, err)
+ // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
+ //
+ // require.NoError(t, bs.Close())
+ //
+ // // Increase width to have blobovnicza which is definitely empty.
+ // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
+ // require.NoError(t, b.Open(false))
+ // require.NoError(t, b.Init())
+ //
+ // var p string
+ // for i := 0; i < 2; i++ {
+ // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
+ // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
+ // p = bp
+ // break
+ // }
+ // }
+ // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
+ // require.NoError(t, os.Chmod(p, 0))
+ //
+ // require.NoError(t, b.Close())
+ // require.NoError(t, bs.Open(false))
+ // require.NoError(t, bs.Init())
+ //
+ // var prm IteratePrm
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // return nil
+ // })
+ // _, err = bs.Iterate(prm)
+ // require.Error(t, err)
+ //
+ // prm.IgnoreErrors()
+ //
+ // t.Run("skip invalid objects", func(t *testing.T) {
+ // actual := make([]oid.Address, 0, len(addrs))
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // obj := object.New()
+ // err := obj.Unmarshal(e.data)
+ // if err != nil {
+ // return err
+ // }
+ //
+ // var addr oid.Address
+ // cnr, _ := obj.ContainerID()
+ // addr.SetContainer(cnr)
+ // id, _ := obj.ID()
+ // addr.SetObject(id)
+ // actual = append(actual, addr)
+ // return nil
+ // })
+ //
+ // _, err := bs.Iterate(prm)
+ // require.NoError(t, err)
+ // require.ElementsMatch(t, addrs, actual)
+ // })
+ // t.Run("return errors from handler", func(t *testing.T) {
+ // n := 0
+ // expectedErr := errors.New("expected error")
+ // prm.SetIterationHandler(func(e IterationElement) error {
+ // if n++; n == objCount/2 {
+ // return expectedErr
+ // }
+ // return nil
+ // })
+ // _, err := bs.Iterate(prm)
+ // require.ErrorIs(t, err, expectedErr)
+ // })
}
diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go
index 070b1eac9..f201f18d7 100644
--- a/pkg/local_object_storage/blobstor/logger.go
+++ b/pkg/local_object_storage/blobstor/logger.go
@@ -1,20 +1,16 @@
package blobstor
import (
- "context"
-
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-const (
- deleteOp = "DELETE"
- putOp = "PUT"
-)
+const deleteOp = "DELETE"
+const putOp = "PUT"
-func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
- storagelog.Write(ctx, l,
+func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
+ storagelog.Write(l,
storagelog.AddressField(addr),
storagelog.OpField(op),
storagelog.StorageTypeField(typ),
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 3df96a1c3..7f6e85a16 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -1,22 +1,17 @@
package memstore
-import (
- "context"
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-)
-
-func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
- s.readOnly = mod.ReadOnly()
+func (s *memstoreImpl) Open(readOnly bool) error {
+ s.readOnly = readOnly
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close(context.Context) error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc }
-func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression }
-func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {}
-func (s *memstoreImpl) SetParentID(string) {}
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close() error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 7ef7e37a4..9428f457f 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
- return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
- return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
+ return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil
@@ -133,15 +133,18 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
elem := common.IterationElement{
ObjectData: v,
}
- if err := elem.Address.DecodeString(k); err != nil {
+ if err := elem.Address.DecodeString(string(k)); err != nil {
if req.IgnoreErrors {
continue
}
- return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
+ return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {
if req.IgnoreErrors {
+ if req.ErrorHandler != nil {
+ return common.IterateRes{}, req.ErrorHandler(elem.Address, err)
+ }
continue
}
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decompressing data for address %q: %v", s, elem.Address.String(), err))
@@ -151,6 +154,10 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
if err := req.Handler(elem); err != nil {
return common.IterateRes{}, err
}
+ case req.LazyHandler != nil:
+ if err := req.LazyHandler(elem.Address, func() ([]byte, error) { return elem.ObjectData, nil }); err != nil {
+ return common.IterateRes{}, err
+ }
default:
if !req.IgnoreErrors {
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) no Handler or LazyHandler set for IteratePrm", s))
@@ -159,14 +166,3 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
}
return common.IterateRes{}, nil
}
-
-func (s *memstoreImpl) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
- return common.RebuildRes{}, nil
-}
-
-func (s *memstoreImpl) ObjectsCount(_ context.Context) (uint64, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- return uint64(len(s.objs)), nil
-}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index f904d4232..eaa2a4b61 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -7,7 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
@@ -15,9 +15,10 @@ import (
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
+ WithLogger(test.NewLogger(t, true)),
)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
- require.NoError(t, s.Open(mode.ComponentReadWrite))
+ t.Cleanup(func() { _ = s.Close() })
+ require.NoError(t, s.Open(false))
require.NoError(t, s.Init())
obj := blobstortest.NewObject(1024)
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 7605af4e5..3d67b1e9c 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,20 +2,33 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
type cfg struct {
+ log *logger.Logger
rootPath string
readOnly bool
- compression *compression.Compressor
+ compression *compression.Config
+ reportError func(string, error)
}
func defaultConfig() *cfg {
- return &cfg{}
+ return &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+ reportError: func(string, error) {},
+ }
}
type Option func(*cfg)
+func WithLogger(l *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = l
+ }
+}
+
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
diff --git a/pkg/local_object_storage/blobstor/metrics.go b/pkg/local_object_storage/blobstor/metrics.go
index aadc237af..4a7b40092 100644
--- a/pkg/local_object_storage/blobstor/metrics.go
+++ b/pkg/local_object_storage/blobstor/metrics.go
@@ -13,7 +13,6 @@ type Metrics interface {
Get(d time.Duration, size int, success, withStorageID bool)
Iterate(d time.Duration, success bool)
Put(d time.Duration, size int, success bool)
- ObjectsCount(d time.Duration, success bool)
}
type noopMetrics struct{}
@@ -27,4 +26,3 @@ func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
-func (m *noopMetrics) ObjectsCount(time.Duration, bool) {}
diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go
index 80268fa7a..2f4473bd8 100644
--- a/pkg/local_object_storage/blobstor/mode.go
+++ b/pkg/local_object_storage/blobstor/mode.go
@@ -8,7 +8,7 @@ import (
)
// SetMode sets the blobstor mode of operation.
-func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
+func (b *BlobStor) SetMode(m mode.Mode) error {
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@@ -20,17 +20,16 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
- err := b.Close(ctx)
+ err := b.Close()
if err == nil {
- if err = b.openBlobStor(ctx, m); err == nil {
- err = b.Init(ctx)
+ if err = b.Open(context.TODO(), m.ReadOnly()); err == nil {
+ err = b.Init()
}
}
if err != nil {
- return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
+ return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m
- b.metrics.SetMode(m.ReadOnly())
return nil
}
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 64e3c8da1..c773ea0ee 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
@@ -23,9 +22,13 @@ type storage struct {
func (s storage) open(b *testing.B) common.Storage {
st := s.create(b.TempDir())
- require.NoError(b, st.Open(mode.ComponentReadWrite))
+ require.NoError(b, st.Open(false))
require.NoError(b, st.Init())
+ b.Cleanup(func() {
+ require.NoError(b, st.Close())
+ })
+
return st
}
@@ -74,7 +77,6 @@ var storages = []storage{
desc: "blobovniczatree",
create: func(dir string) common.Storage {
return blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(dir),
)
},
@@ -106,11 +108,10 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
var errG errgroup.Group
- for range tt.size {
+ for i := 0; i < tt.size; i++ {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
errG.Go(func() error {
@@ -161,7 +162,6 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -200,10 +200,9 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
- defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
- for range tt.size {
+ for i := 0; i < tt.size; i++ {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index fe9c109dd..1adae303d 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
- return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
+ return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
}
prm.RawData = data
}
@@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
success = true
- logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
+ logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}
return res, err
}
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
deleted file mode 100644
index f28816555..000000000
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package blobstor
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-type StorageIDUpdate interface {
- UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error
-}
-
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error {
- var summary common.RebuildRes
- var rErr error
- for _, storage := range b.storage {
- res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
- MetaStorage: upd,
- Limiter: concLimiter,
- FillPercent: fillPercent,
- })
- summary.FilesRemoved += res.FilesRemoved
- summary.ObjectsMoved += res.ObjectsMoved
- if err != nil {
- b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
- zap.String("failed_storage_path", storage.Storage.Path()),
- zap.String("failed_storage_type", storage.Storage.Type()),
- zap.Error(err))
- rErr = err
- break
- }
- }
- b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
- zap.Bool("success", rErr == nil),
- zap.Uint64("total_files_removed", summary.FilesRemoved),
- zap.Uint64("total_objects_moved", summary.ObjectsMoved))
- return rErr
-}
diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go
index 3a38ecf82..b897832cf 100644
--- a/pkg/local_object_storage/blobstor/teststore/option.go
+++ b/pkg/local_object_storage/blobstor/teststore/option.go
@@ -1,25 +1,22 @@
package teststore
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
type cfg struct {
st common.Storage
overrides struct {
- Open func(mode mode.ComponentMode) error
+ Open func(readOnly bool) error
Init func() error
Close func() error
Type func() string
Path func() string
- SetCompressor func(cc *compression.Compressor)
- Compressor func() *compression.Compressor
- SetReportErrorFunc func(f func(context.Context, string, error))
+ SetCompressor func(cc *compression.Config)
+ Compressor func() *compression.Config
+ SetReportErrorFunc func(f func(string, error))
Get func(common.GetPrm) (common.GetRes, error)
GetRange func(common.GetRangePrm) (common.GetRangeRes, error)
@@ -38,22 +35,22 @@ func WithSubstorage(st common.Storage) Option {
}
}
-func WithOpen(f func(mode.ComponentMode) error) Option { return func(c *cfg) { c.overrides.Open = f } }
-func WithInit(f func() error) Option { return func(c *cfg) { c.overrides.Init = f } }
-func WithClose(f func() error) Option { return func(c *cfg) { c.overrides.Close = f } }
+func WithOpen(f func(bool) error) Option { return func(c *cfg) { c.overrides.Open = f } }
+func WithInit(f func() error) Option { return func(c *cfg) { c.overrides.Init = f } }
+func WithClose(f func() error) Option { return func(c *cfg) { c.overrides.Close = f } }
func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } }
func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } }
-func WithSetCompressor(f func(*compression.Compressor)) Option {
+func WithSetCompressor(f func(*compression.Config)) Option {
return func(c *cfg) { c.overrides.SetCompressor = f }
}
-func WithCompressor(f func() *compression.Compressor) Option {
+func WithCompressor(f func() *compression.Config) Option {
return func(c *cfg) { c.overrides.Compressor = f }
}
-func WithReportErrorFunc(f func(func(context.Context, string, error))) Option {
+func WithReportErrorFunc(f func(func(string, error))) Option {
return func(c *cfg) { c.overrides.SetReportErrorFunc = f }
}
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
index 190b6a876..3e5b21251 100644
--- a/pkg/local_object_storage/blobstor/teststore/teststore.go
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -20,7 +20,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
// TestStore is a common.Storage implementation for testing/mocking purposes.
@@ -51,16 +50,16 @@ func (s *TestStore) SetOption(opt Option) {
opt(s.cfg)
}
-func (s *TestStore) Open(mod mode.ComponentMode) error {
+func (s *TestStore) Open(readOnly bool) error {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Open != nil:
- return s.overrides.Open(mod)
+ return s.overrides.Open(readOnly)
case s.st != nil:
- return s.st.Open(mod)
+ return s.st.Open(readOnly)
default:
- panic(fmt.Sprintf("unexpected storage call: Open(%v)", mod.String()))
+ panic(fmt.Sprintf("unexpected storage call: Open(%v)", readOnly))
}
}
@@ -77,14 +76,14 @@ func (s *TestStore) Init() error {
}
}
-func (s *TestStore) Close(ctx context.Context) error {
+func (s *TestStore) Close() error {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Close != nil:
return s.overrides.Close()
case s.st != nil:
- return s.st.Close(ctx)
+ return s.st.Close()
default:
panic("unexpected storage call: Close()")
}
@@ -116,7 +115,7 @@ func (s *TestStore) Path() string {
}
}
-func (s *TestStore) SetCompressor(cc *compression.Compressor) {
+func (s *TestStore) SetCompressor(cc *compression.Config) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -129,7 +128,7 @@ func (s *TestStore) SetCompressor(cc *compression.Compressor) {
}
}
-func (s *TestStore) Compressor() *compression.Compressor {
+func (s *TestStore) Compressor() *compression.Config {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -142,7 +141,7 @@ func (s *TestStore) Compressor() *compression.Compressor {
}
}
-func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) {
+func (s *TestStore) SetReportErrorFunc(f func(string, error)) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
@@ -230,14 +229,3 @@ func (s *TestStore) Iterate(ctx context.Context, req common.IteratePrm) (common.
}
func (s *TestStore) SetParentID(string) {}
-
-func (s *TestStore) Rebuild(_ context.Context, _ common.RebuildPrm) (common.RebuildRes, error) {
- return common.RebuildRes{}, nil
-}
-
-func (s *TestStore) ObjectsCount(ctx context.Context) (uint64, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- return s.st.ObjectsCount(ctx)
-}
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index e0617a832..e45f502ac 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -44,25 +44,22 @@ func (r ListContainersRes) Containers() []cid.ID {
// ContainerSize returns the sum of estimation container sizes among all shards.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- defer elapsed("ContainerSize", e.metrics.AddMethodDuration)()
-
+func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
err = e.execIfNotBlocked(func() error {
- var csErr error
- res, csErr = e.containerSize(ctx, prm)
- return csErr
+ res, err = e.containerSize(prm)
+ return err
})
return
}
// ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards.
-func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) {
+func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
var prm ContainerSizePrm
prm.SetContainerID(id)
- res, err := e.ContainerSize(ctx, prm)
+ res, err := e.ContainerSize(prm)
if err != nil {
return 0, err
}
@@ -70,15 +67,18 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er
return res.Size(), nil
}
-func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
- var res ContainerSizeRes
- err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
+ if e.metrics != nil {
+ defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
+ }
+
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
- csRes, err := sh.ContainerSize(ctx, csPrm)
+ csRes, err := sh.Shard.ContainerSize(csPrm)
if err != nil {
- e.reportShardError(ctx, sh, "can't get container size", err,
+ e.reportShardError(sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
return false
}
@@ -88,19 +88,16 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
return false
})
- return res, err
+ return
}
// ListContainers returns a unique container IDs presented in the engine objects.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) {
- defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
-
err = e.execIfNotBlocked(func() error {
- var lcErr error
- res, lcErr = e.listContainers(ctx)
- return lcErr
+ res, err = e.listContainers(ctx)
+ return err
})
return
@@ -119,12 +116,16 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
}
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
+ if e.metrics != nil {
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
+ }
+
uniqueIDs := make(map[string]cid.ID)
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
- res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
- e.reportShardError(ctx, sh, "can't get list of containers", err)
+ e.reportShardError(sh, "can't get list of containers", err)
return false
}
@@ -136,9 +137,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes,
}
return false
- }); err != nil {
- return ListContainersRes{}, err
- }
+ })
result := make([]cid.ID, 0, len(uniqueIDs))
for _, v := range uniqueIDs {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index bf1649f6e..d97148f73 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -22,11 +22,15 @@ type shardInitError struct {
// Open opens all StorageEngine's components.
func (e *StorageEngine) Open(ctx context.Context) error {
+ return e.open(ctx)
+}
+
+func (e *StorageEngine) open(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
var wg sync.WaitGroup
- errCh := make(chan shardInitError, len(e.shards))
+ var errCh = make(chan shardInitError, len(e.shards))
for id, sh := range e.shards {
wg.Add(1)
@@ -45,16 +49,16 @@ func (e *StorageEngine) Open(ctx context.Context) error {
for res := range errCh {
if res.err != nil {
- e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
+ e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close(ctx)
+ err := sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -71,13 +75,15 @@ func (e *StorageEngine) Init(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
- errCh := make(chan shardInitError, len(e.shards))
+ var errCh = make(chan shardInitError, len(e.shards))
var eg errgroup.Group
- if e.lowMem && e.anyShardRequiresRefill() {
+ if e.cfg.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1)
}
for id, sh := range e.shards {
+ id := id
+ sh := sh
eg.Go(func() error {
if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{
@@ -91,29 +97,29 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := eg.Wait()
close(errCh)
if err != nil {
- return fmt.Errorf("initialize shards: %w", err)
+ return fmt.Errorf("failed to initialize shards: %w", err)
}
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
- e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
+ e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
sh := e.shards[res.id]
delete(e.shards, res.id)
- err := sh.Close(ctx)
+ err := sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
continue
}
- return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
+ return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
}
}
@@ -122,7 +128,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
}
e.wg.Add(1)
- go e.setModeLoop(ctx)
+ go e.setModeLoop()
return nil
}
@@ -145,19 +151,25 @@ var errClosed = errors.New("storage engine is closed")
func (e *StorageEngine) Close(ctx context.Context) error {
close(e.closeCh)
defer e.wg.Wait()
- return e.closeEngine(ctx)
+ return e.setBlockExecErr(ctx, errClosed)
}
// closes all shards. Never returns an error, shard errors are logged.
-func (e *StorageEngine) closeAllShards(ctx context.Context) error {
+func (e *StorageEngine) close(releasePools bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
+ if releasePools {
+ for _, p := range e.shardPools {
+ p.Release()
+ }
+ }
+
for id, sh := range e.shards {
- if err := sh.Close(ctx); err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
+ if err := sh.Close(); err != nil {
+ e.log.Debug(logs.EngineCouldNotCloseShard,
zap.String("id", id),
- zap.Error(err),
+ zap.String("error", err.Error()),
)
}
}
@@ -172,29 +184,90 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error {
e.blockExec.mtx.RLock()
defer e.blockExec.mtx.RUnlock()
- if e.blockExec.closed {
- return errClosed
+ if e.blockExec.err != nil {
+ return e.blockExec.err
}
return op()
}
-func (e *StorageEngine) closeEngine(ctx context.Context) error {
+// sets the flag of blocking execution of all data operations according to err:
+// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
+// (if err == errClosed => additionally releases pools and does not allow to resume executions).
+// - otherwise, resumes execution. If exec was blocked, calls open method.
+//
+// Can be called concurrently with exec. In this case it waits for all executions to complete.
+func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error {
e.blockExec.mtx.Lock()
defer e.blockExec.mtx.Unlock()
- if e.blockExec.closed {
+ prevErr := e.blockExec.err
+
+ wasClosed := errors.Is(prevErr, errClosed)
+ if wasClosed {
return errClosed
}
- e.blockExec.closed = true
- return e.closeAllShards(ctx)
+ e.blockExec.err = err
+
+ if err == nil {
+ if prevErr != nil { // block -> ok
+ return e.open(ctx)
+ }
+ } else if prevErr == nil { // ok -> block
+ return e.close(errors.Is(err, errClosed))
+ }
+
+ // otherwise do nothing
+
+ return nil
+}
+
+// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
+// To resume the execution, use ResumeExecution method.
+//
+// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
+// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
+// to complete). Returns error if any Close has been called before.
+//
+// Must not be called concurrently with either Open or Init.
+//
+// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
+// for this.
+func (e *StorageEngine) BlockExecution(err error) error {
+ return e.setBlockExecErr(context.Background(), err)
+}
+
+// ResumeExecution resumes the execution of any data-related operation.
+// To block the execution, use BlockExecution method.
+//
+// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
+// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
+// to complete). Returns error if any Close has been called before.
+//
+// Must not be called concurrently with either Open or Init.
+func (e *StorageEngine) ResumeExecution() error {
+ return e.setBlockExecErr(context.Background(), nil)
}
type ReConfiguration struct {
+ errorsThreshold uint32
+ shardPoolSize uint32
+
shards map[string][]shard.Option // meta path -> shard opts
}
+// SetErrorsThreshold sets a size amount of errors after which
+// shard is moved to read-only mode.
+func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
+ rCfg.errorsThreshold = errorsThreshold
+}
+
+// SetShardPoolSize sets a size of worker pool for each shard.
+func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
+ rCfg.shardPoolSize = shardPoolSize
+}
+
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
@@ -249,12 +322,12 @@ loop:
e.mtx.RUnlock()
- e.removeShards(ctx, shardsToRemove...)
+ e.removeShards(shardsToRemove...)
for _, p := range shardsToReload {
err := p.sh.Reload(ctx, p.opts...)
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
+ e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
@@ -263,7 +336,7 @@ loop:
for _, newID := range shardsToAdd {
sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
- return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
+ return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
@@ -273,17 +346,17 @@ loop:
err = sh.Init(ctx)
}
if err != nil {
- _ = sh.Close(ctx)
- return fmt.Errorf("init %s shard: %w", idStr, err)
+ _ = sh.Close()
+ return fmt.Errorf("could not init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
- _ = sh.Close(ctx)
- return fmt.Errorf("add %s shard: %w", idStr, err)
+ _ = sh.Close()
+ return fmt.Errorf("could not add %s shard: %w", idStr, err)
}
- e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
+ e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
}
return nil
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 4ff0ed5ec..0c5ff94da 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -2,6 +2,7 @@ package engine
import (
"context"
+ "errors"
"fmt"
"io/fs"
"os"
@@ -11,14 +12,18 @@ import (
"testing"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -39,13 +44,16 @@ func TestInitializationFailure(t *testing.T) {
storages, smallFileStorage, largeFileStorage := newTestStorages(t.TempDir(), 1<<20)
- wcOpts := []writecache.Option{
- writecache.WithPath(t.TempDir()),
+ wcOpts := writecacheconfig.Options{
+ Type: writecacheconfig.TypeBBolt,
+ BBoltOptions: []writecachebbolt.Option{
+ writecachebbolt.WithPath(t.TempDir()),
+ },
}
return []shard.Option{
shard.WithID(sid),
- shard.WithLogger(test.NewLogger(t)),
+ shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(
blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions(
@@ -54,7 +62,7 @@ func TestInitializationFailure(t *testing.T) {
OpenFile: opts.openFileMetabase,
}),
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o700),
+ meta.WithPermissions(0700),
meta.WithEpochState(epochState{})),
shard.WithWriteCache(true),
shard.WithWriteCacheOptions(wcOpts),
@@ -70,7 +78,7 @@ func TestInitializationFailure(t *testing.T) {
openFileMetabase: os.OpenFile,
openFilePilorama: os.OpenFile,
})
- largeFileStorage.SetOption(teststore.WithOpen(func(primitiveMode mode.ComponentMode) error {
+ largeFileStorage.SetOption(teststore.WithOpen(func(ro bool) error {
return teststore.ErrDiskExploded
}))
beforeReload := func() {
@@ -114,39 +122,39 @@ func TestInitializationFailure(t *testing.T) {
})
}
-func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.Option, beforeReload func()) {
+func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Option, beforeReload func()) {
var configID string
e := New()
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
_, err := e.AddShard(context.Background(), opts...)
- require.NoError(t, err)
-
- e.mtx.RLock()
- var id string
- for id = range e.shards {
- break
- }
- configID = calculateShardID(e.shards[id].Shard.DumpInfo())
- e.mtx.RUnlock()
-
- err = e.Open(context.Background())
- require.NoError(t, err)
- if degradedMode {
- require.NoError(t, e.Init(context.Background()))
- require.Equal(t, mode.DegradedReadOnly, e.DumpInfo().Shards[0].Mode)
- return
+ if errOnAdd {
+ require.Error(t, err)
+ // This branch is only taken when we cannot update shard ID in the metabase.
+ // The id cannot be encountered during normal operation, but it is ok for tests:
+ // it is only compared for equality with other ids and we have 0 shards here.
+ configID = "id"
} else {
- require.Error(t, e.Init(context.Background()))
+ require.NoError(t, err)
e.mtx.RLock()
- shardCount := len(e.shards)
+ var id string
+ for id = range e.shards {
+ break
+ }
+ configID = calculateShardID(e.shards[id].Shard.DumpInfo())
e.mtx.RUnlock()
- require.Equal(t, 0, shardCount)
+
+ err = e.Open(context.Background())
+ if err == nil {
+ require.Error(t, e.Init(context.Background()))
+ }
}
+ e.mtx.RLock()
+ shardCount := len(e.shards)
+ e.mtx.RUnlock()
+ require.Equal(t, 0, shardCount)
+
beforeReload()
require.NoError(t, e.Reload(context.Background(), ReConfiguration{
@@ -154,11 +162,47 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
}))
e.mtx.RLock()
- shardCount := len(e.shards)
+ shardCount = len(e.shards)
e.mtx.RUnlock()
require.Equal(t, 1, shardCount)
}
+func TestExecBlocks(t *testing.T) {
+ e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
+
+ // put some object
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+
+ addr := object.AddressOf(obj)
+
+ require.NoError(t, Put(context.Background(), e, obj))
+
+ // block executions
+ errBlock := errors.New("block exec err")
+
+ require.NoError(t, e.BlockExecution(errBlock))
+
+ // try to exec some op
+ _, err := Head(context.Background(), e, addr)
+ require.ErrorIs(t, err, errBlock)
+
+ // resume executions
+ require.NoError(t, e.ResumeExecution())
+
+ _, err = Head(context.Background(), e, addr) // can be any data-related op
+ require.NoError(t, err)
+
+ // close
+ require.NoError(t, e.Close(context.Background()))
+
+ // try exec after close
+ _, err = Head(context.Background(), e, addr)
+ require.Error(t, err)
+
+ // try to resume
+ require.Error(t, e.ResumeExecution())
+}
+
func TestPersistentShardID(t *testing.T) {
dir := t.TempDir()
@@ -168,7 +212,7 @@ func TestPersistentShardID(t *testing.T) {
require.NoError(t, te.ng.Close(context.Background()))
newTe := newEngineWithErrorThreshold(t, dir, 1)
- for i := range len(newTe.shards) {
+ for i := 0; i < len(newTe.shards); i++ {
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
}
require.NoError(t, newTe.ng.Close(context.Background()))
@@ -184,6 +228,7 @@ func TestPersistentShardID(t *testing.T) {
require.Equal(t, te.shards[1].id, newTe.shards[0].id)
require.Equal(t, te.shards[0].id, newTe.shards[1].id)
require.NoError(t, newTe.ng.Close(context.Background()))
+
}
func TestReload(t *testing.T) {
@@ -205,6 +250,7 @@ func TestReload(t *testing.T) {
// no new paths => no new shards
require.Equal(t, shardNum, len(e.shards))
+ require.Equal(t, shardNum, len(e.shardPools))
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
@@ -216,8 +262,7 @@ func TestReload(t *testing.T) {
require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards))
-
- require.NoError(t, e.Close(context.Background()))
+ require.Equal(t, shardNum+1, len(e.shardPools))
})
t.Run("remove shards", func(t *testing.T) {
@@ -227,7 +272,7 @@ func TestReload(t *testing.T) {
e, currShards := engineWithShards(t, removePath, shardNum)
var rcfg ReConfiguration
- for i := range len(currShards) - 1 { // without one of the shards
+ for i := 0; i < len(currShards)-1; i++ { // without one of the shards
rcfg.AddShard(currShards[i], nil)
}
@@ -235,8 +280,7 @@ func TestReload(t *testing.T) {
// removed one
require.Equal(t, shardNum-1, len(e.shards))
-
- require.NoError(t, e.Close(context.Background()))
+ require.Equal(t, shardNum-1, len(e.shardPools))
})
}
@@ -250,17 +294,16 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
te := testNewEngine(t).
setShardsNumOpts(t, num, func(id int) []shard.Option {
return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
+ shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(
- blobstor.WithStorages(newStorages(t, filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
+ blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))),
- meta.WithPermissions(0o700),
+ meta.WithPermissions(0700),
meta.WithEpochState(epochState{}),
),
}
- }).
- prepare(t)
+ })
e, ids := te.engine, te.shardIDs
for _, id := range ids {
@@ -268,6 +311,10 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
}
require.Equal(t, num, len(e.shards))
+ require.Equal(t, num, len(e.shardPools))
+
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
return e, currShards
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 223cdbc48..68a7325c6 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -23,6 +24,9 @@ type DeletePrm struct {
forceRemoval bool
}
+// DeleteRes groups the resulting values of Delete operation.
+type DeleteRes struct{}
+
// WithAddress is a Delete option to set the addresses of the objects to delete.
//
// Option is required.
@@ -47,33 +51,38 @@ func (p *DeletePrm) WithForceRemoval() {
// NOTE: Marks any object to be deleted (despite any prohibitions
// on operations with that object) if WithForceRemoval option has
// been provided.
-func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error {
+func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("force_removal", prm.forceRemoval),
))
defer span.End()
- defer elapsed("Delete", e.metrics.AddMethodDuration)()
- return e.execIfNotBlocked(func() error {
- return e.delete(ctx, prm)
+ err = e.execIfNotBlocked(func() error {
+ res, err = e.delete(ctx, prm)
+ return err
})
+
+ return
}
-func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
+func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
+ }
+
var locked struct {
is bool
}
var splitInfo *objectSDK.SplitInfo
- var ecInfo *objectSDK.ECInfo
// Removal of a big object is done in multiple stages:
// 1. Remove the parent object. If it is locked or already removed, return immediately.
// 2. Otherwise, search for all objects with a particular SplitID and delete them too.
- if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
var existsPrm shard.ExistsPrm
- existsPrm.Address = prm.addr
+ existsPrm.SetAddress(prm.addr)
resExists, err := sh.Exists(ctx, existsPrm)
if err != nil {
@@ -82,18 +91,13 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
}
var splitErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
- if errors.As(err, &splitErr) {
- splitInfo = splitErr.SplitInfo()
- } else if errors.As(err, &ecErr) {
- e.deleteChunks(ctx, sh, ecInfo, prm)
- return false
- } else {
+ if !errors.As(err, &splitErr) {
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not check object existence", err)
}
return false
}
+ splitInfo = splitErr.SplitInfo()
} else if !resExists.Exists() {
return false
}
@@ -106,7 +110,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
_, err = sh.Inhume(ctx, shPrm)
if err != nil {
- e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not inhume object in shard", err)
var target *apistatus.ObjectLocked
locked.is = errors.As(err, &target)
@@ -116,40 +120,39 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error {
// If a parent object is removed we should set GC mark on each shard.
return splitInfo == nil
- }); err != nil {
- return err
- }
+ })
if locked.is {
- return new(apistatus.ObjectLocked)
+ return DeleteRes{}, new(apistatus.ObjectLocked)
}
if splitInfo != nil {
- return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
+ e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
}
- return nil
+ return DeleteRes{}, nil
}
-func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error {
+func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) {
var fs objectSDK.SearchFilters
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
+ selectPrm.SetContainerID(addr.Container())
var inhumePrm shard.InhumePrm
if force {
inhumePrm.ForceRemoval()
}
- return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, selectPrm)
if err != nil {
- e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
+ e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false
}
@@ -158,39 +161,13 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
+ e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
- zap.Error(err))
+ zap.String("err", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
}
return false
})
}
-
-func (e *StorageEngine) deleteChunks(
- ctx context.Context, sh hashedShard, ecInfo *objectSDK.ECInfo, prm DeletePrm,
-) {
- var inhumePrm shard.InhumePrm
- if prm.forceRemoval {
- inhumePrm.ForceRemoval()
- }
- for _, chunk := range ecInfo.Chunks {
- var addr oid.Address
- addr.SetContainer(prm.addr.Container())
- var objID oid.ID
- err := objID.ReadFromV2(chunk.ID)
- if err != nil {
- e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
- }
- addr.SetObject(objID)
- inhumePrm.MarkAsGarbage(addr)
- _, err = sh.Inhume(ctx, inhumePrm)
- if err != nil {
- e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
- zap.Stringer("addr", addr),
- zap.Error(err))
- continue
- }
- }
-}
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index a56598c09..8e94732ae 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -6,7 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -48,13 +48,18 @@ func TestDeleteBigObject(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
- e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ s1 := testNewShard(t, 1)
+ s2 := testNewShard(t, 2)
+ s3 := testNewShard(t, 3)
+
+ e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
+ e.log = test.NewLogger(t, true)
+ defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i], false))
+ require.NoError(t, Put(context.Background(), e, children[i]))
}
- require.NoError(t, Put(context.Background(), e, link, false))
+ require.NoError(t, Put(context.Background(), e, link))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -70,100 +75,9 @@ func TestDeleteBigObject(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- require.NoError(t, e.Delete(context.Background(), deletePrm))
-
- checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
- checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
- for i := range children {
- checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
- }
-}
-
-func TestDeleteBigObjectWithoutGC(t *testing.T) {
- t.Parallel()
-
- cnr := cidtest.ID()
- parentID := oidtest.ID()
- splitID := objectSDK.NewSplitID()
-
- parent := testutil.GenerateObjectWithCID(cnr)
- parent.SetID(parentID)
- parent.SetPayload(nil)
-
- const childCount = 3
- children := make([]*objectSDK.Object, childCount)
- childIDs := make([]oid.ID, childCount)
- for i := range children {
- children[i] = testutil.GenerateObjectWithCID(cnr)
- if i != 0 {
- children[i].SetPreviousID(childIDs[i-1])
- }
- if i == len(children)-1 {
- children[i].SetParent(parent)
- }
- children[i].SetSplitID(splitID)
- children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)})
- childIDs[i], _ = children[i].ID()
- }
-
- link := testutil.GenerateObjectWithCID(cnr)
- link.SetParent(parent)
- link.SetParentID(parentID)
- link.SetSplitID(splitID)
- link.SetChildren(childIDs...)
-
- te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{shard.WithDisabledGC()}
- }).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- s1 := te.shards[0]
-
- for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i], false))
- }
- require.NoError(t, Put(context.Background(), e, link, false))
-
- addrParent := object.AddressOf(parent)
- checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
-
- addrLink := object.AddressOf(link)
- checkGetError[error](t, e, addrLink, false)
-
- for i := range children {
- checkGetError[error](t, e, object.AddressOf(children[i]), false)
- }
-
- // delete logical
- var deletePrm DeletePrm
- deletePrm.WithForceRemoval()
- deletePrm.WithAddress(addrParent)
-
- require.NoError(t, e.Delete(context.Background(), deletePrm))
-
- checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
- checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
- for i := range children {
- checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
- }
-
- // delete physical
- var delPrm shard.DeletePrm
- delPrm.SetAddresses(addrParent)
- _, err := s1.Delete(context.Background(), delPrm)
+ _, err := e.Delete(context.Background(), deletePrm)
require.NoError(t, err)
- delPrm.SetAddresses(addrLink)
- _, err = s1.Delete(context.Background(), delPrm)
- require.NoError(t, err)
-
- for i := range children {
- delPrm.SetAddresses(object.AddressOf(children[i]))
- _, err = s1.Delete(context.Background(), delPrm)
- require.NoError(t, err)
- }
-
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
for i := range children {
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 376d545d3..bb0b682d6 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -7,14 +7,12 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@@ -28,13 +26,16 @@ type StorageEngine struct {
shards map[string]hashedShard
+ shardPools map[string]util.WorkerPool
+
closeCh chan struct{}
setModeCh chan setModeRequest
wg sync.WaitGroup
blockExec struct {
- mtx sync.RWMutex
- closed bool
+ mtx sync.RWMutex
+
+ err error
}
evacuateLimiter *evacuationLimiter
}
@@ -52,7 +53,7 @@ type setModeRequest struct {
// setModeLoop listens setModeCh to perform degraded mode transition of a single shard.
// Instead of creating a worker per single shard we use a single goroutine.
-func (e *StorageEngine) setModeLoop(ctx context.Context) {
+func (e *StorageEngine) setModeLoop() {
defer e.wg.Done()
var (
@@ -72,7 +73,7 @@ func (e *StorageEngine) setModeLoop(ctx context.Context) {
if !ok {
inProgress[sid] = struct{}{}
go func() {
- e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta)
+ e.moveToDegraded(r.sh, r.errorCount, r.isMeta)
mtx.Lock()
delete(inProgress, sid)
@@ -84,7 +85,7 @@ func (e *StorageEngine) setModeLoop(ctx context.Context) {
}
}
-func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) {
+func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta bool) {
sid := sh.ID()
log := e.log.With(
zap.Stringer("shard_id", sid),
@@ -94,26 +95,28 @@ func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, err
defer e.mtx.RUnlock()
if isMeta {
- err := sh.SetMode(ctx, mode.DegradedReadOnly)
+ err := sh.SetMode(mode.DegradedReadOnly)
if err == nil {
- log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
+ log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
return
}
- log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
+ log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
zap.Error(err))
}
- err := sh.SetMode(ctx, mode.ReadOnly)
+ err := sh.SetMode(mode.ReadOnly)
if err != nil {
- log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
+ log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
return
}
- log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
+ log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorByID increases shard error counter and logs an error.
-func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) {
+// reportShardErrorBackground increases shard error counter and logs an error.
+// It is intended to be used from background workers and
+// doesn't change shard mode because of possible deadlocks.
+func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -122,61 +125,79 @@ func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg
return
}
- e.reportShardError(ctx, sh, msg, err)
+ if isLogical(err) {
+ e.log.Warn(msg,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()))
+ return
+ }
+
+ errCount := sh.errorCount.Add(1)
+ sh.Shard.IncErrorCounter()
+ e.reportShardErrorWithFlags(sh.Shard, errCount, false, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
// If it does, shard is set to read-only mode.
func (e *StorageEngine) reportShardError(
- ctx context.Context,
sh hashedShard,
msg string,
err error,
- fields ...zap.Field,
-) {
+ fields ...zap.Field) {
if isLogical(err) {
- e.log.Warn(ctx, msg,
+ e.log.Warn(msg,
zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
+ zap.String("error", err.Error()))
return
}
errCount := sh.errorCount.Add(1)
- e.metrics.IncErrorCounter(sh.ID().String())
+ sh.Shard.IncErrorCounter()
+ e.reportShardErrorWithFlags(sh.Shard, errCount, true, msg, err, fields...)
+}
+func (e *StorageEngine) reportShardErrorWithFlags(
+ sh *shard.Shard,
+ errCount uint32,
+ block bool,
+ msg string,
+ err error,
+ fields ...zap.Field) {
sid := sh.ID()
- e.log.Warn(ctx, msg, append([]zap.Field{
+ e.log.Warn(msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
- zap.Error(err),
+ zap.String("error", err.Error()),
}, fields...)...)
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
return
}
- req := setModeRequest{
- errorCount: errCount,
- sh: sh.Shard,
- isMeta: errors.As(err, new(metaerr.Error)),
- }
+ isMeta := errors.As(err, new(metaerr.Error))
+ if block {
+ e.moveToDegraded(sh, errCount, isMeta)
+ } else {
+ req := setModeRequest{
+ errorCount: errCount,
+ isMeta: isMeta,
+ sh: sh,
+ }
- select {
- case e.setModeCh <- req:
- default:
- // For background workers we can have a lot of such errors,
- // thus logging is done with DEBUG level.
- e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
- zap.Stringer("shard_id", sid),
- zap.Uint32("error_count", errCount))
+ select {
+ case e.setModeCh <- req:
+ default:
+ // For background workers we can have a lot of such errors,
+ // thus logging is done with DEBUG level.
+ e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
+ zap.Stringer("shard_id", sid),
+ zap.Uint32("error_count", errCount))
+ }
}
}
func isLogical(err error) bool {
- return errors.As(err, &logicerr.Logical{}) ||
- errors.Is(err, context.Canceled) ||
- errors.Is(err, context.DeadlineExceeded) ||
- errors.As(err, new(*apistatus.ResourceExhausted))
+ return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
}
// Option represents StorageEngine's constructor option.
@@ -189,18 +210,17 @@ type cfg struct {
metrics MetricRegister
- lowMem bool
+ shardPoolSize uint32
- containerSource atomic.Pointer[containerSource]
+ lowMem bool
}
func defaultCfg() *cfg {
- res := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
- metrics: noopMetrics{},
+ return &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+
+ shardPoolSize: 20,
}
- res.containerSource.Store(&containerSource{})
- return res
}
// New creates, initializes and returns new StorageEngine instance.
@@ -211,18 +231,13 @@ func New(opts ...Option) *StorageEngine {
opts[i](c)
}
- evLimMtx := &sync.RWMutex{}
- evLimCond := sync.NewCond(evLimMtx)
-
return &StorageEngine{
- cfg: c,
- shards: make(map[string]hashedShard),
- closeCh: make(chan struct{}),
- setModeCh: make(chan setModeRequest),
- evacuateLimiter: &evacuationLimiter{
- guard: evLimMtx,
- statusCond: evLimCond,
- },
+ cfg: c,
+ shards: make(map[string]hashedShard),
+ shardPools: make(map[string]util.WorkerPool),
+ closeCh: make(chan struct{}),
+ setModeCh: make(chan setModeRequest),
+ evacuateLimiter: &evacuationLimiter{},
}
}
@@ -239,6 +254,13 @@ func WithMetrics(v MetricRegister) Option {
}
}
+// WithShardPoolSize returns option to specify size of worker pool for each shard.
+func WithShardPoolSize(sz uint32) Option {
+ return func(c *cfg) {
+ c.shardPoolSize = sz
+ }
+}
+
// WithErrorThreshold returns an option to specify size amount of errors after which
// shard is moved to read-only mode.
func WithErrorThreshold(sz uint32) Option {
@@ -253,30 +275,3 @@ func WithLowMemoryConsumption(lowMemCons bool) Option {
c.lowMem = lowMemCons
}
}
-
-// SetContainerSource sets container source.
-func (e *StorageEngine) SetContainerSource(cs container.Source) {
- e.containerSource.Store(&containerSource{cs: cs})
-}
-
-type containerSource struct {
- cs container.Source
-}
-
-func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) {
- select {
- case <-ctx.Done():
- return false, ctx.Err()
- default:
- }
-
- if s == nil || s.cs == nil {
- return true, nil
- }
-
- wasRemoved, err := container.WasRemoved(ctx, s.cs, id)
- if err != nil {
- return false, err
- }
- return !wasRemoved, nil
-}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index fc6d9ee9c..934bea8bb 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -2,122 +2,149 @@ package engine
import (
"context"
- "fmt"
+ "os"
"path/filepath"
- "runtime/debug"
- "strings"
- "sync"
+ "sync/atomic"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "git.frostfs.info/TrueCloudLab/hrw"
+ "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
)
-type epochState struct {
- currEpoch uint64
-}
+type epochState struct{}
func (s epochState) CurrentEpoch() uint64 {
- return s.currEpoch
+ return 0
+}
+
+func BenchmarkExists(b *testing.B) {
+ b.Run("2 shards", func(b *testing.B) {
+ benchmarkExists(b, 2)
+ })
+ b.Run("4 shards", func(b *testing.B) {
+ benchmarkExists(b, 4)
+ })
+ b.Run("8 shards", func(b *testing.B) {
+ benchmarkExists(b, 8)
+ })
+}
+
+func benchmarkExists(b *testing.B, shardNum int) {
+ shards := make([]*shard.Shard, shardNum)
+ for i := 0; i < shardNum; i++ {
+ shards[i] = testNewShard(b, i)
+ }
+
+ e := testNewEngine(b).setInitializedShards(b, shards...).engine
+ b.Cleanup(func() {
+ _ = e.Close(context.Background())
+ _ = os.RemoveAll(b.Name())
+ })
+
+ addr := oidtest.Address()
+ for i := 0; i < 100; i++ {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ err := Put(context.Background(), e, obj)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ok, err := e.exists(context.Background(), addr)
+ if err != nil || ok {
+ b.Fatalf("%t %v", ok, err)
+ }
+ }
}
type testEngineWrapper struct {
engine *StorageEngine
- shards []*shard.Shard
shardIDs []*shard.ID
}
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
- opts = append(testGetDefaultEngineOptions(t), opts...)
- return &testEngineWrapper{engine: New(opts...)}
+ engine := New(WithLogger(test.NewLogger(t, true)))
+ for _, opt := range opts {
+ opt(engine.cfg)
+ }
+ return &testEngineWrapper{
+ engine: engine,
+ }
+}
+
+func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
+ for _, s := range shards {
+ pool, err := ants.NewPool(10, ants.WithNonblocking(true))
+ require.NoError(t, err)
+
+ te.engine.shards[s.ID().String()] = hashedShard{
+ shardWrapper: shardWrapper{
+ errorCount: new(atomic.Uint32),
+ Shard: s,
+ },
+ hash: hrw.StringHash(s.ID().String()),
+ }
+ te.engine.shardPools[s.ID().String()] = pool
+ te.shardIDs = append(te.shardIDs, s.ID())
+ }
+ return te
}
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
- return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
- return testGetDefaultShardOptions(t)
- })
+ shards := make([]*shard.Shard, 0, num)
+
+ for i := 0; i < num; i++ {
+ shards = append(shards, testNewShard(t, i))
+ }
+
+ return te.setInitializedShards(t, shards...)
}
-func (te *testEngineWrapper) setShardsNumOpts(
- t testing.TB, num int, shardOpts func(id int) []shard.Option,
-) *testEngineWrapper {
- te.shards = make([]*shard.Shard, num)
- te.shardIDs = make([]*shard.ID, num)
- for i := range num {
- shard, err := te.engine.createShard(context.Background(), shardOpts(i))
+func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+ for i := 0; i < num; i++ {
+ opts := shardOpts(i)
+ id, err := te.engine.AddShard(context.Background(), opts...)
require.NoError(t, err)
- require.NoError(t, te.engine.addShard(shard))
- te.shards[i] = shard
- te.shardIDs[i] = shard.ID()
+ te.shardIDs = append(te.shardIDs, id)
}
- require.Len(t, te.engine.shards, num)
return te
}
-func (te *testEngineWrapper) setShardsNumAdditionalOpts(
- t testing.TB, num int, shardOpts func(id int) []shard.Option,
-) *testEngineWrapper {
- return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
- return append(testGetDefaultShardOptions(t), shardOpts(id)...)
- })
-}
-
-// prepare calls Open and Init on the created engine.
-func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
- require.NoError(t, te.engine.Open(context.Background()))
- require.NoError(t, te.engine.Init(context.Background()))
+func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+ for i := 0; i < num; i++ {
+ defaultOpts := testDefaultShardOptions(t, i)
+ opts := append(defaultOpts, shardOpts(i)...)
+ id, err := te.engine.AddShard(context.Background(), opts...)
+ require.NoError(t, err)
+ te.shardIDs = append(te.shardIDs, id)
+ }
return te
}
-func testGetDefaultEngineOptions(t testing.TB) []Option {
- return []Option{
- WithLogger(test.NewLogger(t)),
- }
-}
-
-func testGetDefaultShardOptions(t testing.TB) []shard.Option {
- return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(t, t.TempDir(), 1<<20)),
- blobstor.WithLogger(test.NewLogger(t)),
- ),
- shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
- shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
- shard.WithLimiter(&testQoSLimiter{t: t}),
- }
-}
-
-func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
- return []meta.Option{
- meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o700),
- meta.WithEpochState(epochState{}),
- meta.WithLogger(test.NewLogger(t)),
- }
-}
-
-func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
+func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
return []blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1),
- blobovniczatree.WithPermissions(0o700),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))),
+ blobovniczatree.WithPermissions(0700)),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < smallSize
},
@@ -125,8 +152,7 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor
{
Storage: fstree.New(
fstree.WithPath(root),
- fstree.WithDepth(1),
- fstree.WithLogger(test.NewLogger(t))),
+ fstree.WithDepth(1)),
},
}
}
@@ -134,11 +160,10 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor
func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *teststore.TestStore, *teststore.TestStore) {
smallFileStorage := teststore.New(
teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1),
- blobovniczatree.WithPermissions(0o700)),
+ blobovniczatree.WithPermissions(0700)),
))
largeFileStorage := teststore.New(
teststore.WithSubstorage(fstree.New(
@@ -158,77 +183,29 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
}, smallFileStorage, largeFileStorage
}
-var _ qos.Limiter = (*testQoSLimiter)(nil)
+func testNewShard(t testing.TB, id int) *shard.Shard {
+ sid, err := generateShardID()
+ require.NoError(t, err)
-type testQoSLimiter struct {
- t testing.TB
- quard sync.Mutex
- id int64
- readStacks map[int64][]byte
- writeStacks map[int64][]byte
+ shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t, id)...)
+ s := shard.New(shardOpts...)
+
+ require.NoError(t, s.Open(context.Background()))
+ require.NoError(t, s.Init(context.Background()))
+
+ return s
}
-func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
-
-func (t *testQoSLimiter) Close() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- var sb strings.Builder
- var seqN int
- for _, stack := range t.readStacks {
- seqN++
- sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
- }
- for _, stack := range t.writeStacks {
- seqN++
- sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
- }
- require.True(t.t, seqN == 0, sb.String())
+func testDefaultShardOptions(t testing.TB, id int) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(test.NewLogger(t, true)),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(
+ newStorages(t.TempDir(), 1<<20))),
+ shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
+ meta.WithPermissions(0700),
+ meta.WithEpochState(epochState{}),
+ )}
}
-
-func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- stack := debug.Stack()
-
- t.id++
- id := t.id
-
- if t.readStacks == nil {
- t.readStacks = make(map[int64][]byte)
- }
- t.readStacks[id] = stack
-
- return func() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- delete(t.readStacks, id)
- }, nil
-}
-
-func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- stack := debug.Stack()
-
- t.id++
- id := t.id
-
- if t.writeStacks == nil {
- t.writeStacks = make(map[int64][]byte)
- }
- t.writeStacks[id] = stack
-
- return func() {
- t.quard.Lock()
- defer t.quard.Unlock()
-
- delete(t.writeStacks, id)
- }, nil
-}
-
-func (t *testQoSLimiter) SetParentID(string) {}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index 57029dd5f..90356104e 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"strconv"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -46,6 +45,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
var testShards [2]*testShard
te := testNewEngine(t,
+ WithShardPoolSize(1),
WithErrorThreshold(errThreshold),
).
setShardsNumOpts(t, 2, func(id int) []shard.Option {
@@ -55,19 +55,21 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
largeFileStorage: largeFileStorage,
}
return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
+ shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
- meta.WithPermissions(0o700),
+ meta.WithPermissions(0700),
meta.WithEpochState(epochState{}),
),
shard.WithPiloramaOptions(
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
- pilorama.WithPerm(0o700)),
+ pilorama.WithPerm(0700)),
}
- }).prepare(t)
+ })
e := te.engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
for i, id := range te.shardIDs {
testShards[i].id = id
@@ -112,7 +114,6 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- require.NoError(t, te.ng.Close(context.Background()))
})
t.Run("with error threshold", func(t *testing.T) {
const errThreshold = 3
@@ -148,19 +149,18 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- for i := range uint32(2) {
+ for i := uint32(0); i < 2; i++ {
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
- require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false))
+ require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, false))
checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite)
- require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true))
+ require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
- require.NoError(t, te.ng.Close(context.Background()))
})
}
@@ -219,7 +219,6 @@ func TestBlobstorFailback(t *testing.T) {
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
- require.NoError(t, te.ng.Close(context.Background()))
}
func checkShardState(t *testing.T, e *StorageEngine, id *shard.ID, errCount uint32, mode mode.Mode) {
@@ -227,8 +226,6 @@ func checkShardState(t *testing.T, e *StorageEngine, id *shard.ID, errCount uint
sh := e.shards[id.String()]
e.mtx.RUnlock()
- require.Eventually(t, func() bool {
- return errCount == sh.errorCount.Load() &&
- mode == sh.GetMode()
- }, 10*time.Second, 10*time.Millisecond, "shard mode doesn't changed to expected state in 10 seconds")
+ require.Equal(t, errCount, sh.errorCount.Load())
+ require.Equal(t, mode, sh.GetMode())
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index c08dfbf03..192070c1c 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -4,37 +4,22 @@ import (
"context"
"errors"
"fmt"
- "slices"
- "strings"
- "sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-const (
- // containerWorkerCountDefault is a default value of the count of
- // concurrent container evacuation workers.
- containerWorkerCountDefault = 10
- // objectWorkerCountDefault is a default value of the count of
- // concurrent object evacuation workers.
- objectWorkerCountDefault = 10
)
var (
@@ -43,135 +28,73 @@ var (
evacuationOperationLogField = zap.String("operation", "evacuation")
)
-// EvacuateScope is an evacuation scope. Keep in sync with pkg/services/control/service.proto.
-type EvacuateScope uint32
-
-var (
- EvacuateScopeObjects EvacuateScope = 1
- EvacuateScopeTrees EvacuateScope = 2
-)
-
-func (s EvacuateScope) String() string {
- var sb strings.Builder
- first := true
- if s&EvacuateScopeObjects == EvacuateScopeObjects {
- sb.WriteString("objects")
- first = false
- }
- if s&EvacuateScopeTrees == EvacuateScopeTrees {
- if !first {
- sb.WriteString(";")
- }
- sb.WriteString("trees")
- }
- return sb.String()
-}
-
-func (s EvacuateScope) WithObjects() bool {
- return s&EvacuateScopeObjects == EvacuateScopeObjects
-}
-
-func (s EvacuateScope) WithTrees() bool {
- return s&EvacuateScopeTrees == EvacuateScopeTrees
-}
-
-func (s EvacuateScope) TreesOnly() bool {
- return s == EvacuateScopeTrees
-}
-
// EvacuateShardPrm represents parameters for the EvacuateShard operation.
type EvacuateShardPrm struct {
- ShardID []*shard.ID
- ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error)
- TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error)
- IgnoreErrors bool
- Scope EvacuateScope
- RepOneOnly bool
-
- ContainerWorkerCount uint32
- ObjectWorkerCount uint32
+ shardID []*shard.ID
+ handler func(context.Context, oid.Address, *objectSDK.Object) error
+ ignoreErrors bool
+ async bool
}
// EvacuateShardRes represents result of the EvacuateShard operation.
type EvacuateShardRes struct {
- objEvacuated *atomic.Uint64
- objTotal *atomic.Uint64
- objFailed *atomic.Uint64
- objSkipped *atomic.Uint64
-
- trEvacuated *atomic.Uint64
- trTotal *atomic.Uint64
- trFailed *atomic.Uint64
+ evacuated *atomic.Uint64
+ total *atomic.Uint64
+ failed *atomic.Uint64
}
// NewEvacuateShardRes creates new EvacuateShardRes instance.
func NewEvacuateShardRes() *EvacuateShardRes {
return &EvacuateShardRes{
- objEvacuated: new(atomic.Uint64),
- objTotal: new(atomic.Uint64),
- objFailed: new(atomic.Uint64),
- objSkipped: new(atomic.Uint64),
- trEvacuated: new(atomic.Uint64),
- trTotal: new(atomic.Uint64),
- trFailed: new(atomic.Uint64),
+ evacuated: new(atomic.Uint64),
+ total: new(atomic.Uint64),
+ failed: new(atomic.Uint64),
}
}
-// ObjectsEvacuated returns amount of evacuated objects.
+// WithShardIDList sets shard ID.
+func (p *EvacuateShardPrm) WithShardIDList(id []*shard.ID) {
+ p.shardID = id
+}
+
+// WithIgnoreErrors sets flag to ignore errors.
+func (p *EvacuateShardPrm) WithIgnoreErrors(ignore bool) {
+ p.ignoreErrors = ignore
+}
+
+// WithFaultHandler sets handler to call for objects which cannot be saved on other shards.
+func (p *EvacuateShardPrm) WithFaultHandler(f func(context.Context, oid.Address, *objectSDK.Object) error) {
+ p.handler = f
+}
+
+// WithAsync sets flag to run evacuate async.
+func (p *EvacuateShardPrm) WithAsync(async bool) {
+ p.async = async
+}
+
+// Evacuated returns amount of evacuated objects.
// Objects for which handler returned no error are also assumed evacuated.
-func (p *EvacuateShardRes) ObjectsEvacuated() uint64 {
+func (p *EvacuateShardRes) Evacuated() uint64 {
if p == nil {
return 0
}
- return p.objEvacuated.Load()
+ return p.evacuated.Load()
}
-// ObjectsTotal returns total count objects to evacuate.
-func (p *EvacuateShardRes) ObjectsTotal() uint64 {
+// Total returns total count objects to evacuate.
+func (p *EvacuateShardRes) Total() uint64 {
if p == nil {
return 0
}
- return p.objTotal.Load()
+ return p.total.Load()
}
-// ObjectsFailed returns count of failed objects to evacuate.
-func (p *EvacuateShardRes) ObjectsFailed() uint64 {
+// Failed returns count of failed objects to evacuate.
+func (p *EvacuateShardRes) Failed() uint64 {
if p == nil {
return 0
}
- return p.objFailed.Load()
-}
-
-// ObjectsSkipped returns count of skipped objects.
-func (p *EvacuateShardRes) ObjectsSkipped() uint64 {
- if p == nil {
- return 0
- }
- return p.objSkipped.Load()
-}
-
-// TreesEvacuated returns amount of evacuated trees.
-func (p *EvacuateShardRes) TreesEvacuated() uint64 {
- if p == nil {
- return 0
- }
- return p.trEvacuated.Load()
-}
-
-// TreesTotal returns total count trees to evacuate.
-func (p *EvacuateShardRes) TreesTotal() uint64 {
- if p == nil {
- return 0
- }
- return p.trTotal.Load()
-}
-
-// TreesFailed returns count of failed trees to evacuate.
-func (p *EvacuateShardRes) TreesFailed() uint64 {
- if p == nil {
- return 0
- }
- return p.trFailed.Load()
+ return p.failed.Load()
}
// DeepCopy returns deep copy of result instance.
@@ -181,52 +104,51 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
}
res := &EvacuateShardRes{
- objEvacuated: new(atomic.Uint64),
- objTotal: new(atomic.Uint64),
- objFailed: new(atomic.Uint64),
- objSkipped: new(atomic.Uint64),
- trEvacuated: new(atomic.Uint64),
- trTotal: new(atomic.Uint64),
- trFailed: new(atomic.Uint64),
+ evacuated: new(atomic.Uint64),
+ total: new(atomic.Uint64),
+ failed: new(atomic.Uint64),
}
- res.objEvacuated.Store(p.objEvacuated.Load())
- res.objTotal.Store(p.objTotal.Load())
- res.objFailed.Store(p.objFailed.Load())
- res.objSkipped.Store(p.objSkipped.Load())
- res.trTotal.Store(p.trTotal.Load())
- res.trEvacuated.Store(p.trEvacuated.Load())
- res.trFailed.Store(p.trFailed.Load())
+ res.evacuated.Store(p.evacuated.Load())
+ res.total.Store(p.total.Load())
+ res.failed.Store(p.failed.Load())
return res
}
+const defaultEvacuateBatchSize = 100
+
+type pooledShard struct {
+ hashedShard
+ pool util.WorkerPool
+}
+
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error {
+func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) {
select {
case <-ctx.Done():
- return ctx.Err()
+ return nil, ctx.Err()
default:
}
- shardIDs := make([]string, len(prm.ShardID))
- for i := range prm.ShardID {
- shardIDs[i] = prm.ShardID[i].String()
+ shardIDs := make([]string, len(prm.shardID))
+ for i := range prm.shardID {
+ shardIDs[i] = prm.shardID[i].String()
}
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
- attribute.Bool("ignoreErrors", prm.IgnoreErrors),
- attribute.Stringer("scope", prm.Scope),
+ attribute.Bool("async", prm.async),
+ attribute.Bool("ignoreErrors", prm.ignoreErrors),
))
defer span.End()
- shards, err := e.getActualShards(shardIDs, prm)
+ shards, weights, err := e.getActualShards(shardIDs, prm.handler != nil)
if err != nil {
- return err
+ return nil, err
}
shardsToEvacuate := make(map[string]*shard.Shard)
@@ -239,36 +161,39 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
}
res := NewEvacuateShardRes()
- ctx = context.WithoutCancel(ctx)
- eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
+ ctx = ctxOrBackground(ctx, prm.async)
+ eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
+
if err != nil {
- return err
+ return nil, err
}
- var mtx sync.RWMutex
- copyShards := func() []hashedShard {
- mtx.RLock()
- defer mtx.RUnlock()
- t := slices.Clone(shards)
- return t
- }
eg.Go(func() error {
- return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate)
+ return e.evacuateShards(egCtx, shardIDs, prm, res, shards, weights, shardsToEvacuate)
})
- return nil
+ if prm.async {
+ return nil, nil
+ }
+
+ return res, eg.Wait()
+}
+
+func ctxOrBackground(ctx context.Context, background bool) context.Context {
+ if background {
+ return context.Background()
+ }
+ return ctx
}
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) error {
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
trace.WithAttributes(
attribute.StringSlice("shardIDs", shardIDs),
- attribute.Bool("ignoreErrors", prm.IgnoreErrors),
- attribute.Stringer("scope", prm.Scope),
- attribute.Bool("repOneOnly", prm.RepOneOnly),
+ attribute.Bool("async", prm.async),
+ attribute.Bool("ignoreErrors", prm.ignoreErrors),
))
defer func() {
@@ -276,524 +201,185 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
e.evacuateLimiter.Complete(err)
}()
- e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
+ e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- err = e.getTotals(ctx, prm, shardsToEvacuate, res)
+ err = e.getTotalObjectsCount(ctx, shardsToEvacuate, res)
if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
+ e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
- ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
- continueLoop := true
- for i := 0; continueLoop && i < len(shardIDs); i++ {
- select {
- case <-ctx.Done():
- continueLoop = false
- default:
- egShard.Go(func() error {
- err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
- if err != nil {
- cancel(err)
- }
- return err
- })
+ for _, shardID := range shardIDs {
+ if err = e.evacuateShard(ctx, shardID, prm, res, shards, weights, shardsToEvacuate); err != nil {
+ e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField)
+ return err
}
}
- err = egShard.Wait()
- if err != nil {
- err = fmt.Errorf("shard error: %w", err)
- }
- errContainer := egContainer.Wait()
- errObject := egObject.Wait()
- if errContainer != nil {
- err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
- }
- if errObject != nil {
- err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
- }
- if err != nil {
- e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.Stringer("scope", prm.Scope))
- return err
- }
- e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
+ e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
zap.Strings("shard_ids", shardIDs),
evacuationOperationLogField,
- zap.Uint64("total_objects", res.ObjectsTotal()),
- zap.Uint64("evacuated_objects", res.ObjectsEvacuated()),
- zap.Uint64("failed_objects", res.ObjectsFailed()),
- zap.Uint64("skipped_objects", res.ObjectsSkipped()),
- zap.Uint64("total_trees", res.TreesTotal()),
- zap.Uint64("evacuated_trees", res.TreesEvacuated()),
- zap.Uint64("failed_trees", res.TreesFailed()),
+ zap.Uint64("total", res.Total()),
+ zap.Uint64("evacuated", res.Evacuated()),
+ zap.Uint64("failed", res.Failed()),
)
return nil
}
-func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
- context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
-) {
- operationCtx, cancel := context.WithCancelCause(ctx)
- egObject, _ := errgroup.WithContext(operationCtx)
- objectWorkerCount := prm.ObjectWorkerCount
- if objectWorkerCount == 0 {
- objectWorkerCount = objectWorkerCountDefault
- }
- egObject.SetLimit(int(objectWorkerCount))
- egContainer, _ := errgroup.WithContext(operationCtx)
- containerWorkerCount := prm.ContainerWorkerCount
- if containerWorkerCount == 0 {
- containerWorkerCount = containerWorkerCountDefault
- }
- egContainer.SetLimit(int(containerWorkerCount))
- egShard, _ := errgroup.WithContext(operationCtx)
-
- return operationCtx, cancel, egShard, egContainer, egObject
-}
-
-func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
+func (e *StorageEngine) getTotalObjectsCount(ctx context.Context, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotalObjectsCount")
defer span.End()
for _, sh := range shardsToEvacuate {
- if prm.Scope.WithObjects() {
- cnt, err := sh.LogicalObjectsCount(ctx)
- if err != nil {
- if errors.Is(err, shard.ErrDegradedMode) {
- continue
- }
- return err
+ cnt, err := sh.LogicalObjectsCount(ctx)
+ if err != nil {
+ if errors.Is(err, shard.ErrDegradedMode) {
+ continue
}
- res.objTotal.Add(cnt)
- }
- if prm.Scope.WithTrees() && sh.PiloramaEnabled() {
- cnt, err := pilorama.TreeCountAll(ctx, sh)
- if err != nil {
- return err
- }
- res.trTotal.Add(cnt)
+ return err
}
+ res.total.Add(cnt)
}
return nil
}
-func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
- egContainer *errgroup.Group, egObject *errgroup.Group,
-) error {
+func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
trace.WithAttributes(
attribute.String("shardID", shardID),
))
defer span.End()
- if prm.Scope.WithObjects() {
- if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
- return err
- }
- }
- if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
- if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
- return err
- }
- }
+ var listPrm shard.ListWithCursorPrm
+ listPrm.WithCount(defaultEvacuateBatchSize)
- return nil
-}
-
-func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
- egContainer *errgroup.Group, egObject *errgroup.Group,
-) error {
sh := shardsToEvacuate[shardID]
- var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
- egContainer.Go(func() error {
- var skip bool
- c, err := e.containerSource.Load().cs.Get(ctx, cnt)
- if err != nil {
- if client.IsErrContainerNotFound(err) {
- skip = true
- } else {
- return err
- }
- }
- if !skip && prm.RepOneOnly {
- skip = e.isNotRepOne(c)
- }
- if skip {
- countPrm := shard.CountAliveObjectsInContainerPrm{
- ObjectType: objType,
- ContainerID: cnt,
- }
- count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
- if err != nil {
- return err
- }
- res.objSkipped.Add(count)
- return nil
- }
- var objPrm shard.IterateOverObjectsInContainerPrm
- objPrm.ObjectType = objType
- objPrm.ContainerID = cnt
- objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
- egObject.Go(func() error {
- err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
- if err != nil {
- cancel(err)
- }
- return err
- })
- return nil
- }
- err = sh.IterateOverObjectsInContainer(ctx, objPrm)
- if err != nil {
- cancel(err)
- }
- return err
- })
- return nil
- }
- sh.SetEvacuationInProgress(true)
- err := sh.IterateOverContainers(ctx, cntPrm)
- if err != nil {
- cancel(err)
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField)
- }
- return err
-}
-
-func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) error {
- sh := shardsToEvacuate[shardID]
- shards := getShards()
-
- var listPrm pilorama.TreeListTreesPrm
- first := true
-
- for len(listPrm.NextPageToken) > 0 || first {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- first = false
-
- listRes, err := sh.TreeListTrees(ctx, listPrm)
- if err != nil {
- return err
- }
- listPrm.NextPageToken = listRes.NextPageToken
- if err := e.evacuateTrees(ctx, sh, listRes.Items, prm, res, shards, shardsToEvacuate); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
- trace.WithAttributes(
- attribute.Int("trees_count", len(trees)),
- ))
- defer span.End()
-
- for _, contTree := range trees {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- success, shardID, err := e.tryEvacuateTreeLocal(ctx, sh, contTree, prm, shards, shardsToEvacuate)
- if err != nil {
- return err
- }
- if success {
- e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
- zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
- zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
- evacuationOperationLogField)
- res.trEvacuated.Add(1)
- continue
- }
-
- moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
- if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
- zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
- zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
- return err
- }
- if moved {
- e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
- zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
- zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
- evacuationOperationLogField)
- res.trEvacuated.Add(1)
- } else if prm.IgnoreErrors {
- res.trFailed.Add(1)
- e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
- zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
- zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
- } else {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
- zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
- zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
- zap.Error(err))
- return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID)
- }
- }
- return nil
-}
-
-func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
- if prm.TreeHandler == nil {
- return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
- }
-
- return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)
-}
-
-func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
- prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) (bool, string, error) {
- target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
- if err != nil {
- return false, "", err
- }
- if !found {
- return false, "", nil
- }
- const readBatchSize = 1000
- source := make(chan *pilorama.Move, readBatchSize)
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- var wg sync.WaitGroup
-
- wg.Add(1)
- var applyErr error
- go func() {
- defer wg.Done()
-
- applyErr = target.TreeApplyStream(ctx, tree.CID, tree.TreeID, source)
- if applyErr != nil {
- cancel()
- }
- }()
-
- var height uint64
+ var c *meta.Cursor
for {
- op, err := sh.TreeGetOpLog(ctx, tree.CID, tree.TreeID, height)
+ listPrm.WithCursor(c)
+
+ // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
+ // because ListWithCursor works only with the metabase.
+ listRes, err := sh.ListWithCursor(ctx, listPrm)
if err != nil {
- cancel()
- wg.Wait()
- close(source) // close after cancel to ctx.Done() hits first
- if prm.IgnoreErrors {
- return false, "", nil
+ if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
+ break
}
- return false, "", err
+ e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
}
- if op.Time == 0 { // completed get op log
- close(source)
- wg.Wait()
- if applyErr == nil {
- return true, target.ID().String(), nil
- }
- if prm.IgnoreErrors {
- return false, "", nil
- }
- return false, "", applyErr
+ if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
+ return err
}
- select {
- case <-ctx.Done(): // apply stream failed or operation cancelled
- wg.Wait()
- if prm.IgnoreErrors {
- return false, "", nil
- }
- if applyErr != nil {
- return false, "", applyErr
- }
- return false, "", ctx.Err()
- case source <- &op:
- }
-
- height = op.Time + 1
+ c = listRes.Cursor()
}
+ return nil
}
-// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
-func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
- shards []hashedShard, shardsToEvacuate map[string]*shard.Shard,
-) (hashedShard, bool, error) {
- hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
- var result hashedShard
- var found bool
- for _, target := range shards {
- select {
- case <-ctx.Done():
- return hashedShard{}, false, ctx.Err()
- default:
- }
-
- if _, ok := shardsToEvacuate[target.ID().String()]; ok {
- continue
- }
-
- if !target.PiloramaEnabled() || target.GetMode().ReadOnly() {
- continue
- }
-
- if !found {
- result = target
- found = true
- }
-
- exists, err := target.TreeExists(ctx, tree.CID, tree.TreeID)
- if err != nil {
- continue
- }
- if exists {
- return target, true, nil
- }
- }
- return result, found, nil
-}
-
-func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) {
+func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool) ([]pooledShard, []float64, error) {
e.mtx.RLock()
defer e.mtx.RUnlock()
for i := range shardIDs {
sh, ok := e.shards[shardIDs[i]]
if !ok {
- return nil, errShardNotFound
+ return nil, nil, errShardNotFound
}
if !sh.GetMode().ReadOnly() {
- return nil, ErrMustBeReadOnly
- }
-
- if prm.Scope.TreesOnly() && !sh.PiloramaEnabled() {
- return nil, fmt.Errorf("shard %s doesn't have pilorama enabled", sh.ID())
+ return nil, nil, ErrMustBeReadOnly
}
}
- if len(e.shards)-len(shardIDs) < 1 && prm.ObjectsHandler == nil && prm.Scope.WithObjects() {
- return nil, errMustHaveTwoShards
- }
-
- if len(e.shards)-len(shardIDs) < 1 && prm.TreeHandler == nil && prm.Scope.WithTrees() {
- return nil, errMustHaveTwoShards
+ if len(e.shards)-len(shardIDs) < 1 && !handlerDefined {
+ return nil, nil, errMustHaveTwoShards
}
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
- shards := make([]hashedShard, 0, len(e.shards))
+ shards := make([]pooledShard, 0, len(e.shards))
for id := range e.shards {
- shards = append(shards, e.shards[id])
+ shards = append(shards, pooledShard{
+ hashedShard: hashedShard(e.shards[id]),
+ pool: e.shardPools[id],
+ })
}
- return shards, nil
+
+ weights := make([]float64, 0, len(shards))
+ for i := range shards {
+ weights = append(weights, e.shardWeight(shards[i].Shard))
+ }
+
+ return shards, weights, nil
}
-func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
-) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
+func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
+ trace.WithAttributes(
+ attribute.Int("objects_count", len(toEvacuate)),
+ ))
defer span.End()
- select {
- case <-ctx.Done():
- return context.Cause(ctx)
- default:
- }
-
- shards := getShards()
- addr := objInfo.Address
-
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
- getPrm.SkipEvacCheck(true)
-
- getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
- if err != nil {
- if prm.IgnoreErrors {
- res.objFailed.Add(1)
- return nil
+ for i := range toEvacuate {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
}
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- return err
- }
+ addr := toEvacuate[i].Address
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
- if err != nil {
- return err
- }
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
- if evacuatedLocal {
- return nil
- }
+ getRes, err := sh.Get(ctx, getPrm)
+ if err != nil {
+ if prm.ignoreErrors {
+ res.failed.Add(1)
+ continue
+ }
+ e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
- if prm.ObjectsHandler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return fmt.Errorf("%w: %s", errPutShard, objInfo)
- }
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, weights, shardsToEvacuate, res)
+ if err != nil {
+ return err
+ }
- moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
- if err != nil {
- e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- return err
- }
- if moved {
- res.objEvacuated.Add(1)
- } else if prm.IgnoreErrors {
- res.objFailed.Add(1)
- e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField)
- } else {
- return fmt.Errorf("object %s was not replicated", addr)
+ if evacuatedLocal {
+ continue
+ }
+
+ if prm.handler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
+ }
+
+ err = prm.handler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+ res.evacuated.Add(1)
}
return nil
}
-func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
- p := c.Value.PlacementPolicy()
- for i := range p.NumberOfReplicas() {
- if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
- return true
- }
- }
- return false
-}
-
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
- shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
-) (bool, error) {
- hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) (bool, error) {
+ hrw.SortHasherSliceByWeightValue(shards, weights, hrw.StringHash(addr.EncodeToString()))
for j := range shards {
select {
case <-ctx.Done():
@@ -804,20 +390,18 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status {
- case putToShardSuccess:
- res.objEvacuated.Add(1)
- e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
- zap.Stringer("from", sh.ID()),
- zap.Stringer("to", shards[j].ID()),
- zap.Stringer("addr", addr),
- evacuationOperationLogField)
+ putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
+ if putDone || exists {
+ if putDone {
+ res.evacuated.Add(1)
+ e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
+ zap.Stringer("from", sh.ID()),
+ zap.Stringer("to", shards[j].ID()),
+ zap.Stringer("addr", addr),
+ evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ }
return true, nil
- case putToShardExists, putToShardRemoved:
- res.objSkipped.Add(1)
- return true, nil
- default:
- continue
}
}
@@ -843,21 +427,3 @@ func (e *StorageEngine) EnqueRunningEvacuationStop(ctx context.Context) error {
return e.evacuateLimiter.CancelIfRunning()
}
-
-func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- return e.evacuateLimiter.ResetEvacuationStatus()
-}
-
-func (e *StorageEngine) ResetEvacuationStatusForShards() {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
- for _, sh := range e.shards {
- sh.SetEvacuationInProgress(false)
- }
-}
diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go
index b75e8686d..62795fa1a 100644
--- a/pkg/local_object_storage/engine/evacuate_limiter.go
+++ b/pkg/local_object_storage/engine/evacuate_limiter.go
@@ -3,7 +3,6 @@ package engine
import (
"context"
"fmt"
- "slices"
"sync"
"time"
@@ -35,53 +34,25 @@ func (s *EvacuationState) ShardIDs() []string {
return s.shardIDs
}
-func (s *EvacuationState) ObjectsEvacuated() uint64 {
+func (s *EvacuationState) Evacuated() uint64 {
if s == nil {
return 0
}
- return s.result.ObjectsEvacuated()
+ return s.result.Evacuated()
}
-func (s *EvacuationState) ObjectsTotal() uint64 {
+func (s *EvacuationState) Total() uint64 {
if s == nil {
return 0
}
- return s.result.ObjectsTotal()
+ return s.result.Total()
}
-func (s *EvacuationState) ObjectsFailed() uint64 {
+func (s *EvacuationState) Failed() uint64 {
if s == nil {
return 0
}
- return s.result.ObjectsFailed()
-}
-
-func (s *EvacuationState) ObjectsSkipped() uint64 {
- if s == nil {
- return 0
- }
- return s.result.ObjectsSkipped()
-}
-
-func (s *EvacuationState) TreesEvacuated() uint64 {
- if s == nil {
- return 0
- }
- return s.result.TreesEvacuated()
-}
-
-func (s *EvacuationState) TreesTotal() uint64 {
- if s == nil {
- return 0
- }
- return s.result.TreesTotal()
-}
-
-func (s *EvacuationState) TreesFailed() uint64 {
- if s == nil {
- return 0
- }
- return s.result.TreesFailed()
+ return s.result.Failed()
}
func (s *EvacuationState) ProcessingStatus() EvacuateProcessState {
@@ -95,7 +66,8 @@ func (s *EvacuationState) StartedAt() *time.Time {
if s == nil {
return nil
}
- if s.startedAt.IsZero() {
+ defaultTime := time.Time{}
+ if s.startedAt == defaultTime {
return nil
}
return &s.startedAt
@@ -105,7 +77,8 @@ func (s *EvacuationState) FinishedAt() *time.Time {
if s == nil {
return nil
}
- if s.finishedAt.IsZero() {
+ defaultTime := time.Time{}
+ if s.finishedAt == defaultTime {
return nil
}
return &s.finishedAt
@@ -122,7 +95,8 @@ func (s *EvacuationState) DeepCopy() *EvacuationState {
if s == nil {
return nil
}
- shardIDs := slices.Clone(s.shardIDs)
+ shardIDs := make([]string, len(s.shardIDs))
+ copy(shardIDs, s.shardIDs)
return &EvacuationState{
shardIDs: shardIDs,
@@ -139,8 +113,7 @@ type evacuationLimiter struct {
eg *errgroup.Group
cancel context.CancelFunc
- guard *sync.RWMutex
- statusCond *sync.Cond // used in unit tests
+ guard sync.RWMutex
}
func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) {
@@ -166,7 +139,6 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res
startedAt: time.Now().UTC(),
result: result,
}
- l.statusCond.Broadcast()
return l.eg, egCtx, nil
}
@@ -182,7 +154,6 @@ func (l *evacuationLimiter) Complete(err error) {
l.state.processState = EvacuateProcessStateCompleted
l.state.errMessage = errMsq
l.state.finishedAt = time.Now().UTC()
- l.statusCond.Broadcast()
l.eg = nil
}
@@ -205,19 +176,3 @@ func (l *evacuationLimiter) CancelIfRunning() error {
l.cancel()
return nil
}
-
-func (l *evacuationLimiter) ResetEvacuationStatus() error {
- l.guard.Lock()
- defer l.guard.Unlock()
-
- if l.state.processState == EvacuateProcessStateRunning {
- return logicerr.New("there is running evacuation task")
- }
-
- l.state = EvacuationState{}
- l.eg = nil
- l.cancel = nil
- l.statusCond.Broadcast()
-
- return nil
-}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index f2ba7d994..c0c05d661 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -6,60 +6,31 @@ import (
"fmt"
"path/filepath"
"strconv"
- "sync"
- "sync/atomic"
"testing"
"time"
- coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
-type containerStorage struct {
- cntmap map[cid.ID]*container.Container
- latency time.Duration
-}
-
-func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) {
- time.Sleep(cs.latency)
- v, ok := cs.cntmap[id]
- if !ok {
- return nil, new(apistatus.ContainerNotFound)
- }
- coreCnt := coreContainer.Container{
- Value: *v,
- }
- return &coreCnt, nil
-}
-
-func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
- return nil, nil
-}
-
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
dir := t.TempDir()
te := testNewEngine(t).
setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
+ shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{{
Storage: fstree.New(
@@ -68,58 +39,50 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
}})),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
- meta.WithPermissions(0o700),
+ meta.WithPermissions(0700),
meta.WithEpochState(epochState{})),
- shard.WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
- pilorama.WithPerm(0o700),
- ),
}
- }).
- prepare(t)
+ })
e, ids := te.engine, te.shardIDs
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
- treeID := "version"
- meta := []pilorama.KeyValue{
- {Key: pilorama.AttributeVersion, Value: []byte("XXX")},
- {Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
- }
- cnrMap := make(map[cid.ID]*container.Container)
+
for _, sh := range ids {
- for i := range objPerShard {
- // Create dummy container
- cnr1 := container.Container{}
- cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
- contID := cidtest.ID()
- cnrMap[contID] = &cnr1
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ objects = append(objects, obj)
- obj := testutil.GenerateObjectWithCID(contID)
- objects = append(objects, obj)
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
- require.NoError(t, err)
+ for i := 0; ; i++ {
+ objects = append(objects, testutil.GenerateObjectWithCID(cidtest.ID()))
- _, err = e.shards[sh.String()].TreeAddByPath(context.Background(), pilorama.CIDDescriptor{CID: contID, Position: 0, Size: 1},
- treeID, pilorama.AttributeFilename, []string{"path", "to", "the", "file"}, meta)
- require.NoError(t, err)
+ var putPrm PutPrm
+ putPrm.WithObject(objects[len(objects)-1])
+
+ err := e.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ res, err := e.shards[ids[len(ids)-1].String()].List(context.Background())
+ require.NoError(t, err)
+ if len(res.AddressList()) == objPerShard {
+ break
}
}
- e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
-func TestEvacuateShardObjects(t *testing.T) {
+func TestEvacuateShard(t *testing.T) {
t.Parallel()
const objPerShard = 3
e, ids, objects := newEngineEvacuate(t, 3, objPerShard)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
evacuateShardID := ids[2].String()
@@ -136,21 +99,19 @@ func TestEvacuateShardObjects(t *testing.T) {
checkHasObjects(t)
var prm EvacuateShardPrm
- prm.ShardID = ids[2:3]
- prm.Scope = EvacuateScopeObjects
+ prm.WithShardIDList(ids[2:3])
t.Run("must be read-only", func(t *testing.T) {
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, ErrMustBeReadOnly)
+ require.Equal(t, uint64(0), res.Evacuated())
})
- require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated())
+ require.Equal(t, uint64(objPerShard), res.Evacuated())
// We check that all objects are available both before and after shard removal.
// First case is a real-world use-case. It ensures that an object can be put in presense
@@ -158,79 +119,38 @@ func TestEvacuateShardObjects(t *testing.T) {
// Second case ensures that all objects are indeed moved and available.
checkHasObjects(t)
- // Objects on evacuated shards should be logically unavailable, but persisted on disk.
- // This is necessary to prevent removing it by policer in case of `REP 1` policy.
- for _, obj := range objects[len(objects)-objPerShard:] {
- var prmGet shard.GetPrm
- prmGet.SetAddress(objectCore.AddressOf(obj))
- _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
- require.Error(t, err)
-
- prmGet.SkipEvacCheck(true)
- _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
- require.NoError(t, err)
-
- var prmHead shard.HeadPrm
- prmHead.SetAddress(objectCore.AddressOf(obj))
- _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead)
- require.Error(t, err)
-
- var existsPrm shard.ExistsPrm
- existsPrm.Address = objectCore.AddressOf(obj)
- _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm)
- require.Error(t, err)
-
- var rngPrm shard.RngPrm
- rngPrm.SetAddress(objectCore.AddressOf(obj))
- _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm)
- require.Error(t, err)
- }
-
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st = testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(0), st.ObjectsEvacuated())
+ res, err = e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), res.Evacuated())
checkHasObjects(t)
e.mtx.Lock()
delete(e.shards, evacuateShardID)
+ delete(e.shardPools, evacuateShardID)
e.mtx.Unlock()
checkHasObjects(t)
}
-func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState {
- var st *EvacuationState
- var err error
- e.evacuateLimiter.waitForCompleted()
- st, err = e.GetEvacuationState(context.Background())
- require.NoError(t, err)
- require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
- return st
-}
-
-func TestEvacuateObjectsNetwork(t *testing.T) {
+func TestEvacuateNetwork(t *testing.T) {
t.Parallel()
- errReplication := errors.New("handler error")
+ var errReplication = errors.New("handler error")
- acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
- var n atomic.Uint64
- var mtx sync.Mutex
- return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- mtx.Lock()
- defer mtx.Unlock()
- if n.Load() == max {
- return false, errReplication
+ acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) error {
+ var n uint64
+ return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) error {
+ if n == max {
+ return errReplication
}
- n.Add(1)
+ n++
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
- return true, nil
+ return nil
}
}
require.FailNow(t, "handler was called with an unexpected object: %s", addr)
@@ -241,64 +161,49 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
t.Run("single shard", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 1, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
evacuateShardID := ids[0].String()
- require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeObjects
+ prm.shardID = ids[0:1]
- err := e.Evacuate(context.Background(), prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
+ require.Equal(t, uint64(0), res.Evacuated())
- prm.ObjectsHandler = acceptOneOf(objects, 2)
+ prm.handler = acceptOneOf(objects, 2)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, uint64(2), st.ObjectsEvacuated())
+ res, err = e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, uint64(2), res.Evacuated())
})
t.Run("multiple shards, evacuate one", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- prm.ObjectsHandler = acceptOneOf(objects, 2)
- prm.Scope = EvacuateScopeObjects
+ prm.shardID = ids[1:2]
+ prm.handler = acceptOneOf(objects, 2)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, uint64(2), st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, uint64(2), res.Evacuated())
t.Run("no errors", func(t *testing.T) {
- prm.ObjectsHandler = acceptOneOf(objects, 3)
+ prm.handler = acceptOneOf(objects, 3)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, uint64(3), st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res.Evacuated())
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 4, 5)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
evacuateIDs := ids[0:3]
var totalCount uint64
@@ -310,26 +215,23 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
}
for i := range ids {
- require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly))
}
var prm EvacuateShardPrm
- prm.ShardID = evacuateIDs
- prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
- prm.Scope = EvacuateScopeObjects
+ prm.shardID = evacuateIDs
+ prm.handler = acceptOneOf(objects, totalCount-1)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), errReplication.Error())
- require.Equal(t, totalCount-1, st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.ErrorIs(t, err, errReplication)
+ require.Equal(t, totalCount-1, res.Evacuated())
t.Run("no errors", func(t *testing.T) {
- prm.ObjectsHandler = acceptOneOf(objects, totalCount)
+ prm.handler = acceptOneOf(objects, totalCount)
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, st.ErrorMessage(), "")
- require.Equal(t, totalCount, st.ObjectsEvacuated())
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, totalCount, res.Evacuated())
})
})
}
@@ -337,134 +239,93 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
func TestEvacuateCancellation(t *testing.T) {
t.Parallel()
e, ids, _ := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ prm.shardID = ids[1:2]
+ prm.handler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error {
select {
case <-ctx.Done():
- return false, ctx.Err()
+ return ctx.Err()
default:
}
- return true, nil
+ return nil
}
- prm.Scope = EvacuateScopeObjects
ctx, cancel := context.WithCancel(context.Background())
cancel()
- err := e.Evacuate(ctx, prm)
+ res, err := e.Evacuate(ctx, prm)
require.ErrorContains(t, err, "context canceled")
-}
-
-func TestEvacuateCancellationByError(t *testing.T) {
- t.Parallel()
- e, ids, _ := newEngineEvacuate(t, 2, 10)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- var once atomic.Bool
- prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
- var err error
- flag := true
- if once.CompareAndSwap(false, true) {
- err = errors.New("test error")
- flag = false
- }
- return flag, err
- }
- prm.Scope = EvacuateScopeObjects
- prm.ObjectWorkerCount = 2
- prm.ContainerWorkerCount = 2
-
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Contains(t, st.ErrorMessage(), "test error")
+ require.Equal(t, uint64(0), res.Evacuated())
}
func TestEvacuateSingleProcess(t *testing.T) {
e, ids, _ := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
- blocker := make(chan any)
- running := make(chan any)
+ blocker := make(chan interface{})
+ running := make(chan interface{})
var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- prm.Scope = EvacuateScopeObjects
- prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ prm.shardID = ids[1:2]
+ prm.handler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error {
select {
case <-running:
default:
close(running)
}
<-blocker
- return true, nil
+ return nil
}
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
+ res, err := e.Evacuate(egCtx, prm)
+ require.NoError(t, err, "first evacuation failed")
+ require.Equal(t, uint64(3), res.Evacuated())
return nil
})
eg.Go(func() error {
<-running
- require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed")
+ res, err := e.Evacuate(egCtx, prm)
+ require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed")
+ require.Equal(t, uint64(0), res.Evacuated())
close(blocker)
return nil
})
require.NoError(t, eg.Wait())
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.ObjectsEvacuated())
- require.Equal(t, st.ErrorMessage(), "")
}
-func TestEvacuateObjectsAsync(t *testing.T) {
+func TestEvacuateAsync(t *testing.T) {
e, ids, _ := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
- blocker := make(chan any)
- running := make(chan any)
+ blocker := make(chan interface{})
+ running := make(chan interface{})
var prm EvacuateShardPrm
- prm.ShardID = ids[1:2]
- prm.Scope = EvacuateScopeObjects
- prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ prm.shardID = ids[1:2]
+ prm.handler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error {
select {
case <-running:
default:
close(running)
}
<-blocker
- return true, nil
+ return nil
}
st, err := e.GetEvacuationState(context.Background())
require.NoError(t, err, "get init state failed")
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
- require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid init count")
+ require.Equal(t, uint64(0), st.Evacuated(), "invalid init count")
require.Nil(t, st.StartedAt(), "invalid init started at")
require.Nil(t, st.FinishedAt(), "invalid init finished at")
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
@@ -472,9 +333,9 @@ func TestEvacuateObjectsAsync(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed")
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
+ res, err := e.Evacuate(egCtx, prm)
+ require.NoError(t, err, "first evacuation failed")
+ require.Equal(t, uint64(3), res.Evacuated())
return nil
})
@@ -483,7 +344,7 @@ func TestEvacuateObjectsAsync(t *testing.T) {
st, err = e.GetEvacuationState(context.Background())
require.NoError(t, err, "get running state failed")
require.Equal(t, EvacuateProcessStateRunning, st.ProcessingStatus(), "invalid running state")
- require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid running count")
+ require.Equal(t, uint64(0), st.Evacuated(), "invalid running count")
require.NotNil(t, st.StartedAt(), "invalid running started at")
require.Nil(t, st.FinishedAt(), "invalid init finished at")
expectedShardIDs := make([]string, 0, 2)
@@ -493,335 +354,19 @@ func TestEvacuateObjectsAsync(t *testing.T) {
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid running shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
- require.Error(t, e.ResetEvacuationStatus(context.Background()))
-
close(blocker)
- st = testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
+ require.Eventually(t, func() bool {
+ st, err = e.GetEvacuationState(context.Background())
+ return st.ProcessingStatus() == EvacuateProcessStateCompleted
+ }, 3*time.Second, 10*time.Millisecond, "invalid final state")
+
+ require.NoError(t, err, "get final state failed")
+ require.Equal(t, uint64(3), st.Evacuated(), "invalid final count")
require.NotNil(t, st.StartedAt(), "invalid final started at")
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
require.NoError(t, eg.Wait())
-
- require.NoError(t, e.ResetEvacuationStatus(context.Background()))
- st, err = e.GetEvacuationState(context.Background())
- require.NoError(t, err, "get state after reset failed")
- require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid state after reset")
- require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid count after reset")
- require.Nil(t, st.StartedAt(), "invalid started at after reset")
- require.Nil(t, st.FinishedAt(), "invalid finished at after reset")
- require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid shard ids after reset")
- require.Equal(t, "", st.ErrorMessage(), "invalid error message after reset")
-}
-
-func TestEvacuateTreesLocal(t *testing.T) {
- e, ids, _ := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeTrees
-
- expectedShardIDs := make([]string, 0, 1)
- for _, id := range ids[0:1] {
- expectedShardIDs = append(expectedShardIDs, id.String())
- }
-
- st, err := e.GetEvacuationState(context.Background())
- require.NoError(t, err, "get init state failed")
- require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
- require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
- require.Nil(t, st.StartedAt(), "invalid init started at")
- require.Nil(t, st.FinishedAt(), "invalid init finished at")
- require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
- require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
-
- require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
-
- st = testWaitForEvacuationCompleted(t, e)
- require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
- require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
- require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
- require.NotNil(t, st.StartedAt(), "invalid final started at")
- require.NotNil(t, st.FinishedAt(), "invalid final finished at")
- require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
- require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
-
- sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[0].String()])
- require.NoError(t, err, "list source trees failed")
- require.Len(t, sourceTrees, 3)
-
- for _, tr := range sourceTrees {
- exists, err := e.shards[ids[1].String()].TreeExists(context.Background(), tr.CID, tr.TreeID)
- require.NoError(t, err, "failed to check tree existance")
- require.True(t, exists, "tree doesn't exists on target shard")
-
- var height uint64
- var sourceOps []pilorama.Move
- for {
- op, err := e.shards[ids[0].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
- require.NoError(t, err)
- if op.Time == 0 {
- break
- }
- sourceOps = append(sourceOps, op)
- height = op.Time + 1
- }
-
- height = 0
- var targetOps []pilorama.Move
- for {
- op, err := e.shards[ids[1].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
- require.NoError(t, err)
- if op.Time == 0 {
- break
- }
- targetOps = append(targetOps, op)
- height = op.Time + 1
- }
-
- require.Equal(t, sourceOps, targetOps)
- }
-}
-
-func TestEvacuateTreesRemote(t *testing.T) {
- e, ids, _ := newEngineEvacuate(t, 2, 3)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
-
- mutex := sync.Mutex{}
- evacuatedTreeOps := make(map[string][]*pilorama.Move)
- var prm EvacuateShardPrm
- prm.ShardID = ids
- prm.Scope = EvacuateScopeTrees
- prm.TreeHandler = func(ctx context.Context, contID cid.ID, treeID string, f pilorama.Forest) (bool, string, error) {
- key := contID.String() + treeID
- var height uint64
- for {
- op, err := f.TreeGetOpLog(ctx, contID, treeID, height)
- require.NoError(t, err)
-
- if op.Time == 0 {
- return true, "", nil
- }
- mutex.Lock()
- evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
- mutex.Unlock()
- height = op.Time + 1
- }
- }
-
- expectedShardIDs := make([]string, 0, len(ids))
- for _, id := range ids {
- expectedShardIDs = append(expectedShardIDs, id.String())
- }
-
- st, err := e.GetEvacuationState(context.Background())
- require.NoError(t, err, "get init state failed")
- require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
- require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
- require.Nil(t, st.StartedAt(), "invalid init started at")
- require.Nil(t, st.FinishedAt(), "invalid init finished at")
- require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
- require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
-
- require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed")
- st = testWaitForEvacuationCompleted(t, e)
-
- require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count")
- require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count")
- require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
- require.NotNil(t, st.StartedAt(), "invalid final started at")
- require.NotNil(t, st.FinishedAt(), "invalid final finished at")
- require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
- require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
-
- expectedTreeOps := make(map[string][]*pilorama.Move)
- for i := range len(e.shards) {
- sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
- require.NoError(t, err, "list source trees failed")
- require.Len(t, sourceTrees, 3)
-
- for _, tr := range sourceTrees {
- key := tr.CID.String() + tr.TreeID
- var height uint64
- for {
- op, err := e.shards[ids[i].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
- require.NoError(t, err)
-
- if op.Time == 0 {
- break
- }
- expectedTreeOps[key] = append(expectedTreeOps[key], &op)
- height = op.Time + 1
- }
- }
- }
-
- require.Equal(t, expectedTreeOps, evacuatedTreeOps)
-}
-
-func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
- e, ids, _ := newEngineEvacuate(t, 2, 0)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- // Create container with policy REP 2
- cnr1 := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(2)
- p1.AddReplicas(x1)
- x1 = netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(1)
- p1.AddReplicas(x1)
- cnr1.SetPlacementPolicy(p1)
- cnr1.SetAttribute("cnr", "cnr1")
-
- var idCnr1 cid.ID
- container.CalculateID(&idCnr1, cnr1)
-
- cnrmap := make(map[cid.ID]*container.Container)
- var cids []cid.ID
- cnrmap[idCnr1] = &cnr1
- cids = append(cids, idCnr1)
-
- // Create container with policy REP 1
- cnr2 := container.Container{}
- p2 := netmap.PlacementPolicy{}
- p2.SetContainerBackupFactor(1)
- x2 := netmap.ReplicaDescriptor{}
- x2.SetNumberOfObjects(1)
- p2.AddReplicas(x2)
- x2 = netmap.ReplicaDescriptor{}
- x2.SetNumberOfObjects(1)
- p2.AddReplicas(x2)
- cnr2.SetPlacementPolicy(p2)
- cnr2.SetAttribute("cnr", "cnr2")
-
- var idCnr2 cid.ID
- container.CalculateID(&idCnr2, cnr2)
- cnrmap[idCnr2] = &cnr2
- cids = append(cids, idCnr2)
-
- // Create container for simulate removing
- cnr3 := container.Container{}
- p3 := netmap.PlacementPolicy{}
- p3.SetContainerBackupFactor(1)
- x3 := netmap.ReplicaDescriptor{}
- x3.SetNumberOfObjects(1)
- p3.AddReplicas(x3)
- cnr3.SetPlacementPolicy(p3)
- cnr3.SetAttribute("cnr", "cnr3")
-
- var idCnr3 cid.ID
- container.CalculateID(&idCnr3, cnr3)
- cids = append(cids, idCnr3)
-
- e.SetContainerSource(&containerStorage{cntmap: cnrmap})
-
- for _, sh := range ids {
- for j := range 3 {
- for range 4 {
- obj := testutil.GenerateObjectWithCID(cids[j])
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
- require.NoError(t, err)
- }
- }
- }
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeObjects
- prm.RepOneOnly = true
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
-
- require.NoError(t, e.Evacuate(context.Background(), prm))
- st := testWaitForEvacuationCompleted(t, e)
- require.Equal(t, "", st.ErrorMessage())
- require.Equal(t, uint64(4), st.ObjectsEvacuated())
- require.Equal(t, uint64(8), st.ObjectsSkipped())
- require.Equal(t, uint64(0), st.ObjectsFailed())
-}
-
-func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
- t.Skip()
- e, ids, _ := newEngineEvacuate(t, 2, 0)
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
-
- cnrmap := make(map[cid.ID]*container.Container)
- var cids []cid.ID
- // Create containers with policy REP 1
- for i := range 10_000 {
- cnr1 := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetNumberOfObjects(2)
- p1.AddReplicas(x1)
- cnr1.SetPlacementPolicy(p1)
- cnr1.SetAttribute("i", strconv.Itoa(i))
-
- var idCnr1 cid.ID
- container.CalculateID(&idCnr1, cnr1)
-
- cnrmap[idCnr1] = &cnr1
- cids = append(cids, idCnr1)
- }
-
- e.SetContainerSource(&containerStorage{
- cntmap: cnrmap,
- latency: time.Millisecond * 100,
- })
-
- for _, cnt := range cids {
- for range 1 {
- obj := testutil.GenerateObjectWithCID(cnt)
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
- require.NoError(t, err)
- }
- }
-
- var prm EvacuateShardPrm
- prm.ShardID = ids[0:1]
- prm.Scope = EvacuateScopeObjects
- prm.RepOneOnly = true
- prm.ContainerWorkerCount = 10
-
- require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
-
- start := time.Now()
- err := e.Evacuate(context.Background(), prm)
- testWaitForEvacuationCompleted(t, e)
- t.Logf("evacuate took %v\n", time.Since(start))
- require.NoError(t, err)
-}
-
-func (l *evacuationLimiter) waitForCompleted() {
- l.guard.Lock()
- defer l.guard.Unlock()
-
- for l.state.processState != EvacuateProcessStateCompleted {
- l.statusCond.Wait()
- }
}
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index 7dac9eb97..ef6292768 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -8,17 +8,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.uber.org/zap"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-// exists return in the first value true if object exists.
-// Second return value marks is parent object locked.
-func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool, bool, error) {
+func (e *StorageEngine) exists(ctx context.Context, addr oid.Address) (bool, error) {
+ var shPrm shard.ExistsPrm
+ shPrm.SetAddress(addr)
alreadyRemoved := false
exists := false
- locked := false
- if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Exists(ctx, shPrm)
if err != nil {
if client.IsErrObjectAlreadyRemoved(err) {
@@ -37,7 +36,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
+ e.reportShardError(sh, "could not check existence of object in shard", err)
}
return false
}
@@ -45,18 +44,13 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
if !exists {
exists = res.Exists()
}
- if !locked {
- locked = res.Locked()
- }
return false
- }); err != nil {
- return false, false, err
- }
+ })
if alreadyRemoved {
- return false, false, new(apistatus.ObjectAlreadyRemoved)
+ return false, new(apistatus.ObjectAlreadyRemoved)
}
- return exists, locked, nil
+ return exists, nil
}
diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go
deleted file mode 100644
index 9b3c0833f..000000000
--- a/pkg/local_object_storage/engine/exists_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package engine
-
-import (
- "context"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func BenchmarkExists(b *testing.B) {
- b.Run("2 shards", func(b *testing.B) {
- benchmarkExists(b, 2)
- })
- b.Run("4 shards", func(b *testing.B) {
- benchmarkExists(b, 4)
- })
- b.Run("8 shards", func(b *testing.B) {
- benchmarkExists(b, 8)
- })
-}
-
-func benchmarkExists(b *testing.B, shardNum int) {
- e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
- defer func() { require.NoError(b, e.Close(context.Background())) }()
-
- addr := oidtest.Address()
- for range 100 {
- obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(context.Background(), e, obj, false)
- if err != nil {
- b.Fatal(err)
- }
- }
-
- b.ReportAllocs()
- b.ResetTimer()
- for range b.N {
- var shPrm shard.ExistsPrm
- shPrm.Address = addr
- shPrm.ECParentAddress = oid.Address{}
- ok, _, err := e.exists(context.Background(), shPrm)
- if err != nil || ok {
- b.Fatalf("%t %v", ok, err)
- }
- }
-}
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 0694c53f3..f77c44226 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -55,7 +56,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
- defer elapsed("Get", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
@@ -66,6 +66,10 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
+ }
+
errNotFound := new(apistatus.ObjectNotFound)
var shPrm shard.GetPrm
@@ -78,18 +82,12 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
Engine: e,
}
- if err := it.tryGetWithMeta(ctx); err != nil {
- return GetRes{}, err
- }
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
}
- if it.ECInfo != nil {
- return GetRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo))
- }
-
if it.ObjectExpired {
return GetRes{}, errNotFound
}
@@ -99,18 +97,17 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, it.OutError
}
- if err := it.tryGetFromBlobstore(ctx); err != nil {
- return GetRes{}, err
- }
+ it.tryGetFromBlobstore(ctx)
if it.Object == nil {
return GetRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.Error(it.MetaError),
- zap.Stringer("address", prm.addr))
+ zap.String("error", it.MetaError.Error()),
+ zap.Stringer("address", prm.addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
@@ -122,7 +119,6 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
type getShardIterator struct {
Object *objectSDK.Object
SplitInfo *objectSDK.SplitInfo
- ECInfo *objectSDK.ECInfo
OutError error
ShardWithMeta hashedShard
MetaError error
@@ -134,11 +130,10 @@ type getShardIterator struct {
Engine *StorageEngine
splitInfoErr *objectSDK.SplitInfoError
- ecInfoErr *objectSDK.ECInfoError
}
-func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -169,14 +164,6 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
// stop iterating over shards if SplitInfo structure is complete
return withLink && withLast
- case errors.As(err, &i.ecInfoErr):
- if i.ECInfo == nil {
- i.ECInfo = objectSDK.NewECInfo()
- }
-
- util.MergeECInfo(i.ecInfoErr.ECInfo(), i.ECInfo)
- // stop iterating over shards if ECInfo structure is complete
- return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total)
case client.IsErrObjectAlreadyRemoved(err):
i.OutError = err
return true // stop, return it back
@@ -185,19 +172,19 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error {
i.ObjectExpired = true
return true
default:
- i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(sh, "could not get object from shard", err)
return false
}
})
}
-func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error {
+func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already visited.
return false
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index d436dd411..ba5e7cc1d 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -12,7 +12,6 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
)
// HeadPrm groups the parameters of Head operation.
@@ -68,22 +67,24 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err
func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
defer span.End()
- defer elapsed("Head", e.metrics.AddMethodDuration)()
+
+ if e.metrics != nil {
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
+ }
var (
- head *objectSDK.Object
- siErr *objectSDK.SplitInfoError
- outSI *objectSDK.SplitInfo
- eiErr *objectSDK.ECInfoError
- outEI *objectSDK.ECInfo
+ head *objectSDK.Object
+ siErr *objectSDK.SplitInfoError
+ outSI *objectSDK.SplitInfo
+
outError error = new(apistatus.ObjectNotFound)
- shPrm shard.HeadPrm
)
+
+ var shPrm shard.HeadPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRaw(prm.raw)
- if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) {
- shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold
+ e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Head(ctx, shPrm)
if err != nil {
switch {
@@ -93,50 +94,50 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
if outSI == nil {
outSI = objectSDK.NewSplitInfo()
}
+
util.MergeSplitInfo(siErr.SplitInfo(), outSI)
+
_, withLink := outSI.Link()
_, withLast := outSI.LastPart()
+
// stop iterating over shards if SplitInfo structure is complete
if withLink && withLast {
return true
}
+
return false
- case errors.As(err, &eiErr):
- if outEI == nil {
- outEI = objectSDK.NewECInfo()
- }
- util.MergeECInfo(eiErr.ECInfo(), outEI)
- // stop iterating over shards if ECInfo structure is complete
- return len(outEI.Chunks) == int(outEI.Chunks[0].Total)
case client.IsErrObjectAlreadyRemoved(err):
outError = err
+
return true // stop, return it back
case shard.IsErrObjectExpired(err):
// object is found but should not
// be returned
outError = new(apistatus.ObjectNotFound)
+
return true
default:
- e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
+ e.reportShardError(sh, "could not head object from shard", err)
return false
}
}
- head = res.Object()
- return true
- }); err != nil {
- return HeadRes{}, err
- }
- if head != nil {
- return HeadRes{head: head}, nil
- }
+ head = res.Object()
+
+ return true
+ })
+
if outSI != nil {
return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
}
- if outEI != nil {
- return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI))
+
+ if head == nil {
+ return HeadRes{}, outError
}
- return HeadRes{}, outError
+
+ return HeadRes{
+ head: head,
+ }, nil
}
// Head reads object header from local storage by provided address.
@@ -151,3 +152,18 @@ func Head(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objec
return res.Header(), nil
}
+
+// HeadRaw reads object header from local storage by provided address and raw
+// flag.
+func HeadRaw(ctx context.Context, storage *StorageEngine, addr oid.Address, raw bool) (*objectSDK.Object, error) {
+ var headPrm HeadPrm
+ headPrm.WithAddress(addr)
+ headPrm.WithRaw(raw)
+
+ res, err := storage.Head(ctx, headPrm)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.Header(), nil
+}
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index f9db81f16..5c123d617 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
link.SetSplitID(splitID)
t.Run("virtual object split in different shards", func(t *testing.T) {
- te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ s1 := testNewShard(t, 1)
+ s2 := testNewShard(t, 2)
- s1, s2 := te.shards[0], te.shards[1]
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
+ defer e.Close(context.Background())
var putPrmLeft shard.PutPrm
putPrmLeft.SetObject(child)
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index e5f7072e2..293746f70 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -7,10 +7,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -26,6 +26,9 @@ type InhumePrm struct {
forceRemoval bool
}
+// InhumeRes encapsulates results of inhume operation.
+type InhumeRes struct{}
+
// WithTarget sets a list of objects that should be inhumed and tombstone address
// as the reason for inhume operation.
//
@@ -63,226 +66,126 @@ var errInhumeFailure = errors.New("inhume operation failed")
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error {
+func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
defer span.End()
- defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- return e.execIfNotBlocked(func() error {
- return e.inhume(ctx, prm)
- })
-}
-
-func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error {
- addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval)
- if err != nil {
+ err = e.execIfNotBlocked(func() error {
+ res, err = e.inhume(ctx, prm)
return err
- }
-
- var shPrm shard.InhumePrm
- if prm.forceRemoval {
- shPrm.ForceRemoval()
- }
-
- for shardID, addrs := range addrsPerShard {
- if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, addrs...)
- } else {
- shPrm.MarkAsGarbage(addrs...)
- }
-
- sh, exists := e.shards[shardID]
- if !exists {
- e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard,
- zap.Error(errors.New("this shard was expected to exist")),
- zap.String("shard_id", shardID),
- )
- return errInhumeFailure
- }
-
- if _, err := sh.Inhume(ctx, shPrm); err != nil {
- e.reportInhumeError(ctx, err, sh)
- return err
- }
- }
-
- return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm)
-}
-
-func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) {
- if err == nil {
- return
- }
-
- var errLocked *apistatus.ObjectLocked
- switch {
- case errors.As(err, &errLocked):
- case errors.Is(err, shard.ErrLockObjectRemoval):
- case errors.Is(err, shard.ErrReadOnlyMode):
- case errors.Is(err, shard.ErrDegradedMode):
- default:
- e.reportShardError(ctx, hs, "couldn't inhume object in shard", err)
- }
-}
-
-// inhumeNotFoundObjects removes object which are not found on any shard.
-//
-// Besides an object not being found on any shard, it is also important to
-// remove it anyway in order to populate the metabase indexes because they are
-// responsible for the correct object status, i.e., the status will be `object
-// not found` without the indexes, the status will be `object is already
-// removed` with the indexes.
-//
-// It is suggested to evenly remove those objects on each shard with the batch
-// size equal to 1 + floor(number of objects / number of shards).
-func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error {
- if len(addrs) == 0 {
- return nil
- }
-
- var shPrm shard.InhumePrm
- if prm.forceRemoval {
- shPrm.ForceRemoval()
- }
-
- numObjectsPerShard := 1 + len(addrs)/len(e.shards)
-
- var inhumeErr error
- itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
- numObjects := min(numObjectsPerShard, len(addrs))
-
- if numObjects == 0 {
- return true
- }
-
- if prm.tombstone != nil {
- shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...)
- } else {
- shPrm.MarkAsGarbage(addrs[:numObjects]...)
- }
- addrs = addrs[numObjects:]
-
- _, inhumeErr = hs.Inhume(ctx, shPrm)
- e.reportInhumeError(ctx, inhumeErr, hs)
- return inhumeErr != nil
})
- if inhumeErr != nil {
- return inhumeErr
- }
- return itErr
-}
-
-// groupObjectsByShard groups objects based on the shard(s) they are stored on.
-//
-// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
-// the objects are locked.
-//
-// Returns two sets of objects: found objects which are grouped per shard and
-// not found object. Not found objects are objects which are not found on any
-// shard. This can happen if a node is a container node but doesn't participate
-// in a replica group of the object.
-func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) {
- groups = make(map[string][]oid.Address)
-
- var ids []string
- for _, addr := range addrs {
- ids, err = e.findShards(ctx, addr, checkLocked)
- if err != nil {
- return
- }
-
- if len(ids) == 0 {
- notFoundObjects = append(notFoundObjects, addr)
- continue
- }
-
- for _, id := range ids {
- groups[id] = append(groups[id], addr)
- }
- }
return
}
-// findShards determines the shard(s) where the object is stored.
-//
-// If the object is a root object, multiple shards will be returned.
-//
-// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
-// the objects are locked.
-func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) {
- var (
- ids []string
- retErr error
+func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
+ }
- prm shard.ExistsPrm
+ var shPrm shard.InhumePrm
+ if prm.forceRemoval {
+ shPrm.ForceRemoval()
+ }
- siErr *objectSDK.SplitInfoError
- ecErr *objectSDK.ECInfoError
-
- isRootObject bool
- objectExists bool
- )
-
- if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
- objectExists = false
-
- prm.Address = addr
- switch res, err := sh.Exists(ctx, prm); {
- case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err):
- // NOTE(@a-savchuk): there were some considerations that we can stop
- // immediately if the object is already removed or expired. However,
- // the previous method behavior was:
- // - keep iterating if it's a root object and already removed,
- // - stop iterating if it's not a root object and removed.
- //
- // Since my task was only improving method speed, let's keep the
- // previous method behavior. Continue if it's a root object.
- return !isRootObject
- case errors.As(err, &siErr) || errors.As(err, &ecErr):
- isRootObject = true
- objectExists = true
- case err != nil:
- e.reportShardError(
- ctx, sh, "couldn't check for presence in shard",
- err, zap.Stringer("address", addr),
- )
- case res.Exists():
- objectExists = true
- default:
- }
-
- if checkLocked {
- if isLocked, err := sh.IsLocked(ctx, addr); err != nil {
- e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
+ for i := range prm.addrs {
+ if !prm.forceRemoval {
+ locked, err := e.IsLocked(ctx, prm.addrs[i])
+ if err != nil {
+ e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
- zap.Stringer("address", addr),
- )
- } else if isLocked {
- retErr = new(apistatus.ObjectLocked)
- return true
+ zap.Stringer("addr", prm.addrs[i]),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else if locked {
+ return InhumeRes{}, new(apistatus.ObjectLocked)
}
}
- // This exit point must come after checking if the object is locked,
- // since the locked index may be populated even if the object doesn't
- // exist.
- if !objectExists {
- return
+ if prm.tombstone != nil {
+ shPrm.SetTarget(*prm.tombstone, prm.addrs[i])
+ } else {
+ shPrm.MarkAsGarbage(prm.addrs[i])
}
- ids = append(ids, sh.ID().String())
-
- // Continue if it's a root object.
- return !isRootObject
- }); err != nil {
- return nil, err
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true)
+ if err != nil {
+ return InhumeRes{}, err
+ }
+ if !ok {
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false)
+ if err != nil {
+ return InhumeRes{}, err
+ } else if !ok {
+ return InhumeRes{}, errInhumeFailure
+ }
+ }
}
- if retErr != nil {
- return nil, retErr
- }
- return ids, nil
+ return InhumeRes{}, nil
+}
+
+// Returns ok if object was inhumed during this invocation or before.
+func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
+ root := false
+ var existPrm shard.ExistsPrm
+ var retErr error
+ var ok bool
+
+ e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
+ defer func() {
+ // if object is root we continue since information about it
+ // can be presented in other shards
+ if checkExists && root {
+ stop = false
+ }
+ }()
+
+ if checkExists {
+ existPrm.SetAddress(addr)
+ exRes, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) {
+ // inhumed once - no need to be inhumed again
+ ok = true
+ return true
+ }
+
+ var siErr *objectSDK.SplitInfoError
+ if !errors.As(err, &siErr) {
+ e.reportShardError(sh, "could not check for presents in shard", err)
+ return
+ }
+
+ root = true
+ } else if !exRes.Exists() {
+ return
+ }
+ }
+
+ _, err := sh.Inhume(ctx, prm)
+ if err != nil {
+ var errLocked *apistatus.ObjectLocked
+ switch {
+ case errors.As(err, &errLocked):
+ retErr = new(apistatus.ObjectLocked)
+ return true
+ case errors.Is(err, shard.ErrLockObjectRemoval):
+ retErr = meta.ErrLockObjectRemoval
+ return true
+ case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode):
+ retErr = err
+ return true
+ }
+
+ e.reportShardError(sh, "could not inhume object in shard", err)
+ return false
+ }
+
+ ok = true
+ return true
+ })
+
+ return ok, retErr
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
@@ -297,18 +200,17 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var err error
var outErr error
- if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
- locked, err = h.IsLocked(ctx, addr)
+ e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
+ locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
- e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
+ e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
outErr = err
return false
}
return locked
- }); err != nil {
- return false, err
- }
+ })
if locked {
return locked, nil
@@ -317,252 +219,43 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
return locked, outErr
}
-// GetLocks return lock id's if object is locked according to StorageEngine's state.
-func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks",
- trace.WithAttributes(
- attribute.String("address", addr.EncodeToString()),
- ))
- defer span.End()
-
- var allLocks []oid.ID
- var outErr error
-
- if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) {
- locks, err := h.GetLocks(ctx, addr)
- if err != nil {
- e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
- outErr = err
- }
- allLocks = append(allLocks, locks...)
- return false
- }); err != nil {
- return nil, err
- }
- if len(allLocks) > 0 {
- return allLocks, nil
- }
- return allLocks, outErr
-}
-
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err))
- }
+ })
}
func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredLocks(ctx, epoch, lockers)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err))
- }
+ })
}
func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) {
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
- sh.HandleDeletedLocks(ctx, lockers)
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
+ sh.HandleDeletedLocks(lockers)
select {
case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
return true
default:
return false
}
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err))
- }
-}
-
-func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) {
- if len(ids) == 0 {
- return
- }
- idMap, err := e.selectNonExistentIDs(ctx, ids)
- if err != nil {
- return
- }
- if len(idMap) == 0 {
- return
- }
- var failed bool
- var prm shard.ContainerSizePrm
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
- select {
- case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
- failed = true
- return true
- default:
- }
-
- var drop []cid.ID
- for id := range idMap {
- prm.SetContainerID(id)
- s, err := sh.ContainerSize(ctx, prm)
- if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
- failed = true
- return true
- }
- if s.Size() > 0 {
- drop = append(drop, id)
- }
- }
- for _, id := range drop {
- delete(idMap, id)
- }
-
- return len(idMap) == 0
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
- return
- }
- if failed || len(idMap) == 0 {
- return
- }
-
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
- select {
- case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
- failed = true
- return true
- default:
- }
-
- for id := range idMap {
- if err := sh.DeleteContainerSize(ctx, id); err != nil {
- e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
- failed = true
- return true
- }
- }
-
- return false
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err))
- return
- }
- if failed {
- return
- }
- for id := range idMap {
- e.metrics.DeleteContainerSize(id.EncodeToString())
- }
-}
-
-func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []cid.ID) {
- if len(ids) == 0 {
- return
- }
- idMap, err := e.selectNonExistentIDs(ctx, ids)
- if err != nil {
- return
- }
- if len(idMap) == 0 {
- return
- }
- var failed bool
- var prm shard.ContainerCountPrm
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
- select {
- case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
- failed = true
- return true
- default:
- }
-
- var drop []cid.ID
- for id := range idMap {
- prm.ContainerID = id
- s, err := sh.ContainerCount(ctx, prm)
- if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
- failed = true
- return true
- }
- if s.User > 0 || s.Logic > 0 || s.Phy > 0 {
- drop = append(drop, id)
- }
- }
- for _, id := range drop {
- delete(idMap, id)
- }
-
- return len(idMap) == 0
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
- return
- }
- if failed || len(idMap) == 0 {
- return
- }
-
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool {
- select {
- case <-ctx.Done():
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
- failed = true
- return true
- default:
- }
-
- for id := range idMap {
- if err := sh.DeleteContainerCount(ctx, id); err != nil {
- e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
- failed = true
- return true
- }
- }
-
- return false
- }); err != nil {
- e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err))
- return
- }
- if failed {
- return
- }
- for id := range idMap {
- e.metrics.DeleteContainerCount(id.EncodeToString())
- }
-}
-
-func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) (map[cid.ID]struct{}, error) {
- cs := e.containerSource.Load()
-
- idMap := make(map[cid.ID]struct{})
- for _, id := range ids {
- isAvailable, err := cs.IsContainerAvailable(ctx, id)
- if err != nil {
- e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
- return nil, err
- }
- if isAvailable {
- continue
- }
- idMap[id] = struct{}{}
- }
- return idMap, nil
+ })
}
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 0e268cd23..8fff6280c 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -2,24 +2,14 @@ package engine
import (
"context"
- "fmt"
- "strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
- "golang.org/x/sync/errgroup"
)
func TestStorageEngine_Inhume(t *testing.T) {
@@ -46,32 +36,29 @@ func TestStorageEngine_Inhume(t *testing.T) {
link.SetSplitID(splitID)
t.Run("delete small object", func(t *testing.T) {
- t.Parallel()
- e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ e := testNewEngine(t).setShardsNum(t, 1).engine
+ defer e.Close(context.Background())
- err := Put(context.Background(), e, parent, false)
+ err := Put(context.Background(), e, parent)
require.NoError(t, err)
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, false, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
t.Run("delete big object", func(t *testing.T) {
- t.Parallel()
+ s1 := testNewShard(t, 1)
+ s2 := testNewShard(t, 2)
- te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
- e := te.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- s1, s2 := te.shards[0], te.shards[1]
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
+ defer e.Close(context.Background())
var putChild shard.PutPrm
putChild.SetObject(child)
@@ -86,257 +73,11 @@ func TestStorageEngine_Inhume(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, false, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
}
-
-func TestStorageEngine_ECInhume(t *testing.T) {
- parentObjectAddress := oidtest.Address()
- containerID := parentObjectAddress.Container()
-
- chunkObject0 := testutil.GenerateObjectWithCID(containerID)
- chunkObject0.SetECHeader(objectSDK.NewECHeader(
- objectSDK.ECParentInfo{
- ID: parentObjectAddress.Object(),
- }, 0, 4, []byte{}, 0))
-
- chunkObject1 := testutil.GenerateObjectWithCID(containerID)
- chunkObject1.SetECHeader(objectSDK.NewECHeader(
- objectSDK.ECParentInfo{
- ID: parentObjectAddress.Object(),
- }, 1, 4, []byte{}, 0))
-
- tombstone := objectSDK.NewTombstone()
- tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()})
- payload, err := tombstone.Marshal()
- require.NoError(t, err)
- tombstoneObject := testutil.GenerateObjectWithCID(containerID)
- tombstoneObject.SetType(objectSDK.TypeTombstone)
- tombstoneObject.SetPayload(payload)
- tombstoneObjectAddress := object.AddressOf(tombstoneObject)
-
- e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- require.NoError(t, Put(context.Background(), e, chunkObject0, false))
-
- require.NoError(t, Put(context.Background(), e, tombstoneObject, false))
-
- var inhumePrm InhumePrm
- inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress)
- err = e.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
-
- var alreadyRemoved *apistatus.ObjectAlreadyRemoved
-
- require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved)
-
- require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved)
-}
-
-func TestInhumeExpiredRegularObject(t *testing.T) {
- t.Parallel()
-
- const currEpoch = 42
- const objectExpiresAfter = currEpoch - 1
-
- engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{
- shard.WithDisabledGC(),
- shard.WithMetaBaseOptions(append(
- testGetDefaultMetabaseOptions(t),
- meta.WithEpochState(epochState{currEpoch}),
- )...),
- }
- }).prepare(t).engine
-
- cnr := cidtest.ID()
-
- generateAndPutObject := func() *objectSDK.Object {
- obj := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
-
- var putPrm PutPrm
- putPrm.Object = obj
- require.NoError(t, engine.Put(context.Background(), putPrm))
- return obj
- }
-
- t.Run("inhume with tombstone", func(t *testing.T) {
- obj := generateAndPutObject()
- ts := oidtest.Address()
- ts.SetContainer(cnr)
-
- var prm InhumePrm
- prm.WithTarget(ts, object.AddressOf(obj))
- err := engine.Inhume(context.Background(), prm)
- require.NoError(t, err)
- })
-
- t.Run("inhume without tombstone", func(t *testing.T) {
- obj := generateAndPutObject()
-
- var prm InhumePrm
- prm.MarkAsGarbage(object.AddressOf(obj))
- err := engine.Inhume(context.Background(), prm)
- require.NoError(t, err)
- })
-}
-
-func BenchmarkInhumeMultipart(b *testing.B) {
- // The benchmark result insignificantly depends on the number of shards,
- // so do not use it as a benchmark parameter, just set it big enough.
- numShards := 100
-
- for numObjects := 1; numObjects <= 10000; numObjects *= 10 {
- b.Run(
- fmt.Sprintf("objects=%d", numObjects),
- func(b *testing.B) {
- benchmarkInhumeMultipart(b, numShards, numObjects)
- },
- )
- }
-}
-
-func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) {
- b.StopTimer()
-
- engine := testNewEngine(b).
- setShardsNum(b, numShards).prepare(b).engine
- defer func() { require.NoError(b, engine.Close(context.Background())) }()
-
- cnt := cidtest.ID()
- eg := errgroup.Group{}
-
- for range b.N {
- addrs := make([]oid.Address, numObjects)
-
- for i := range numObjects {
- prm := PutPrm{}
-
- prm.Object = objecttest.Object().Parent()
- prm.Object.SetContainerID(cnt)
- prm.Object.SetType(objectSDK.TypeRegular)
-
- addrs[i] = object.AddressOf(prm.Object)
-
- eg.Go(func() error {
- return engine.Put(context.Background(), prm)
- })
- }
- require.NoError(b, eg.Wait())
-
- ts := oidtest.Address()
- ts.SetContainer(cnt)
-
- prm := InhumePrm{}
- prm.WithTarget(ts, addrs...)
-
- b.StartTimer()
- err := engine.Inhume(context.Background(), prm)
- require.NoError(b, err)
- b.StopTimer()
- }
-}
-
-func TestInhumeIfObjectDoesntExist(t *testing.T) {
- const numShards = 4
-
- engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine
- t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) })
-
- t.Run("inhume without tombstone", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, false, false)
- })
- t.Run("inhume with tombstone", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, true, false)
- })
- t.Run("force inhume", func(t *testing.T) {
- testInhumeIfObjectDoesntExist(t, engine, false, true)
- })
-
- t.Run("object is locked", func(t *testing.T) {
- t.Run("inhume without tombstone", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, false, false)
- })
- t.Run("inhume with tombstone", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, true, false)
- })
- t.Run("force inhume", func(t *testing.T) {
- testInhumeLockedIfObjectDoesntExist(t, engine, false, true)
- })
- })
-}
-
-func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
- t.Parallel()
-
- object := oidtest.Address()
- require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce))
-
- err := testHeadObject(e, object)
- if withTombstone {
- require.True(t, client.IsErrObjectAlreadyRemoved(err))
- } else {
- require.True(t, client.IsErrObjectNotFound(err))
- }
-}
-
-func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) {
- t.Parallel()
-
- object := oidtest.Address()
- require.NoError(t, testLockObject(e, object))
-
- err := testInhumeObject(t, e, object, withTombstone, withForce)
- if !withForce {
- var errLocked *apistatus.ObjectLocked
- require.ErrorAs(t, err, &errLocked)
- return
- }
- require.NoError(t, err)
-
- err = testHeadObject(e, object)
- if withTombstone {
- require.True(t, client.IsErrObjectAlreadyRemoved(err))
- } else {
- require.True(t, client.IsErrObjectNotFound(err))
- }
-}
-
-func testLockObject(e *StorageEngine, obj oid.Address) error {
- return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()})
-}
-
-func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error {
- tombstone := oidtest.Address()
- tombstone.SetContainer(obj.Container())
-
- // Due to the tests design it is possible to set both the options,
- // however removal with tombstone and force removal are exclusive.
- require.False(t, withTombstone && withForce)
-
- var inhumePrm InhumePrm
- if withTombstone {
- inhumePrm.WithTarget(tombstone, obj)
- } else {
- inhumePrm.MarkAsGarbage(obj)
- }
- if withForce {
- inhumePrm.WithForceRemoval()
- }
- return e.Inhume(context.Background(), inhumePrm)
-}
-
-func testHeadObject(e *StorageEngine, obj oid.Address) error {
- var headPrm HeadPrm
- headPrm.WithAddress(obj)
-
- _, err := e.Head(context.Background(), headPrm)
- return err
-}
diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go
index 073248862..f9229a2b1 100644
--- a/pkg/local_object_storage/engine/list.go
+++ b/pkg/local_object_storage/engine/list.go
@@ -7,7 +7,6 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
)
// ErrEndOfListing is returned from an object listing with cursor
@@ -69,12 +68,12 @@ func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) {
// ListWithCursorRes contains values returned from ListWithCursor operation.
type ListWithCursorRes struct {
- addrList []objectcore.Info
+ addrList []objectcore.AddressWithType
cursor *Cursor
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (l ListWithCursorRes) AddressList() []objectcore.Info {
+func (l ListWithCursorRes) AddressList() []objectcore.AddressWithType {
return l.addrList
}
@@ -99,11 +98,7 @@ func (l ListWithCursorRes) Cursor() *Cursor {
// Returns ErrEndOfListing if there are no more objects to return or count
// parameter set to zero.
func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor")
- defer span.End()
- defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)()
-
- result := make([]objectcore.Info, 0, prm.count)
+ result := make([]objectcore.AddressWithType, 0, prm.count)
// Set initial cursors
cursor := prm.cursor
@@ -139,7 +134,10 @@ func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPr
continue
}
- count := min(prm.count-uint32(len(result)), batchSize)
+ count := prm.count - uint32(len(result))
+ if count > batchSize {
+ count = batchSize
+ }
var shardPrm shard.ListWithCursorPrm
shardPrm.WithCount(count)
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 6cfa546f8..eef25d209 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -3,6 +3,7 @@ package engine
import (
"context"
"path/filepath"
+ "sort"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -17,6 +18,13 @@ import (
"github.com/stretchr/testify/require"
)
+func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
+ sort.Slice(addrWithType, func(i, j int) bool {
+ return addrWithType[i].Address.EncodeToString() < addrWithType[j].Address.EncodeToString()
+ })
+ return addrWithType
+}
+
func TestListWithCursor(t *testing.T) {
t.Parallel()
@@ -57,32 +65,39 @@ func TestListWithCursor(t *testing.T) {
t.Parallel()
e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
return []shard.Option{
- shard.WithLogger(test.NewLogger(t)),
+ shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(
blobstor.WithStorages(
- newStorages(t, t.TempDir(), 1<<20))),
+ newStorages(t.TempDir(), 1<<20))),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o700),
+ meta.WithPermissions(0700),
meta.WithEpochState(epochState{}),
- ),
- }
- }).prepare(t).engine
- defer func() {
- require.NoError(t, e.Close(context.Background()))
- }()
+ )}
+ }).engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
- expected := make([]object.Info, 0, tt.objectNum)
- got := make([]object.Info, 0, tt.objectNum)
+ t.Cleanup(func() {
+ e.Close(context.Background())
+ })
- for range tt.objectNum {
+ expected := make([]object.AddressWithType, 0, tt.objectNum)
+ got := make([]object.AddressWithType, 0, tt.objectNum)
+
+ for i := 0; i < tt.objectNum; i++ {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
- err := e.Put(context.Background(), PutPrm{Object: obj})
+
+ var prm PutPrm
+ prm.WithObject(obj)
+
+ err := e.Put(context.Background(), prm)
require.NoError(t, err)
- expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
+ expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
+ expected = sortAddresses(expected)
var prm ListWithCursorPrm
prm.count = tt.batchSize
@@ -97,7 +112,8 @@ func TestListWithCursor(t *testing.T) {
prm.cursor = res.Cursor()
}
- require.ElementsMatch(t, expected, got)
+ got = sortAddresses(got)
+ require.Equal(t, expected, got)
})
}
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 3b0cf74f9..5ad603421 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -13,7 +13,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
var errLockFailed = errors.New("lock operation failed")
@@ -32,7 +31,6 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
attribute.Int("locked_count", len(locked)),
))
defer span.End()
- defer elapsed("Lock", e.metrics.AddMethodDuration)()
return e.execIfNotBlocked(func() error {
return e.lock(ctx, idCnr, locker, locked)
@@ -41,19 +39,11 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
- st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true)
- if err != nil {
- return err
- }
- switch st {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
- st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false)
- if err != nil {
- return err
- }
- switch st {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
@@ -69,13 +59,15 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
// - 0: fail
// - 1: locking irregular object
// - 2: ok
-func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) {
+func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
+
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
- retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) {
+
+ e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
// can be presented in other shards
@@ -86,33 +78,19 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
if checkExists {
var existsPrm shard.ExistsPrm
- existsPrm.Address = addrLocked
+ existsPrm.SetAddress(addrLocked)
+
exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
- var eiErr *objectSDK.ECInfoError
- if errors.As(err, &eiErr) {
- eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
- if !ok {
- return false
- }
-
- err = sh.Lock(ctx, idCnr, locker, eclocked)
- if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
- return false
- }
- root = true
- return false
- } else if !errors.As(err, &siErr) {
+ if !errors.As(err, &siErr) {
if shard.IsErrObjectExpired(err) {
// object is already expired =>
// do not lock it
return true
}
- e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+
+ e.reportShardError(sh, "could not check locked object for presence in shard", err)
return
}
@@ -124,33 +102,21 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
+ e.reportShardError(sh, "could not lock object in shard", err)
var errIrregular *apistatus.LockNonRegularObject
if errors.As(err, &errIrregular) {
status = 1
return true
}
+
return false
}
+
status = 2
+
return true
})
+
return
}
-
-func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
- eclocked := []oid.ID{locked}
- for _, chunk := range eiErr.ECInfo().Chunks {
- var objID oid.ID
- err := objID.ReadFromV2(chunk.ID)
- if err != nil {
- e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
- zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
- return nil, false
- }
- eclocked = append(eclocked, objID)
- }
- return eclocked, true
-}
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index b8c9d6b1d..d5c080222 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -6,12 +6,12 @@ import (
"testing"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -57,10 +57,14 @@ func TestLockUserScenario(t *testing.T) {
}),
shard.WithTombstoneSource(tss{lockerExpiresAfter}),
}
- }).
- prepare(t)
+ })
e := testEngine.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+
+ t.Cleanup(func() {
+ _ = e.Close(context.Background())
+ })
lockerID := oidtest.ID()
tombID := oidtest.ID()
@@ -95,7 +99,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -103,7 +107,7 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
objectSDK.WriteLock(lockerObj, locker)
- err = Put(context.Background(), e, lockerObj, false)
+ err = Put(context.Background(), e, lockerObj)
require.NoError(t, err)
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
@@ -114,7 +118,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -122,23 +126,24 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(context.Background(), e, tombObj, false)
+ err = Put(context.Background(), e, tombObj)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
// 5.
- e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
+ e.HandleNewEpoch(lockerExpiresAfter + 1)
+
+ // delay for GC
+ time.Sleep(time.Second)
inhumePrm.WithTarget(tombAddr, objAddr)
- require.Eventually(t, func() bool {
- err = e.Inhume(context.Background(), inhumePrm)
- return err == nil
- }, 30*time.Second, time.Second)
+ _, err = e.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
}
func TestLockExpiration(t *testing.T) {
@@ -160,10 +165,14 @@ func TestLockExpiration(t *testing.T) {
return pool
}),
}
- }).
- prepare(t)
+ })
e := testEngine.engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+
+ t.Cleanup(func() {
+ _ = e.Close(context.Background())
+ })
const lockerExpiresAfter = 13
@@ -173,7 +182,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -185,7 +194,7 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(context.Background(), e, lock, false)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
@@ -195,26 +204,24 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, err)
var inhumePrm InhumePrm
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(cnr)
- inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
+ inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 3.
- e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
+ e.HandleNewEpoch(lockerExpiresAfter + 1)
+
+ // delay for GC processing. It can't be estimated, but making it bigger
+ // will slow down test
+ time.Sleep(time.Second)
// 4.
- tombAddr = oidtest.Address()
- tombAddr.SetContainer(cnr)
- inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
+ inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- require.Eventually(t, func() bool {
- err = e.Inhume(context.Background(), inhumePrm)
- return err == nil
- }, 30*time.Second, time.Second)
+ _, err = e.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
}
func TestLockForceRemoval(t *testing.T) {
@@ -239,9 +246,12 @@ func TestLockForceRemoval(t *testing.T) {
}),
shard.WithDeletedLockCallback(e.processDeletedLocks),
}
- }).
- prepare(t).engine
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ }).engine
+ require.NoError(t, e.Open(context.Background()))
+ require.NoError(t, e.Init(context.Background()))
+ t.Cleanup(func() {
+ _ = e.Close(context.Background())
+ })
cnr := cidtest.ID()
var err error
@@ -249,14 +259,14 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj, false)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
- err = Put(context.Background(), e, lock, false)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
@@ -270,12 +280,12 @@ func TestLockForceRemoval(t *testing.T) {
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
// 4.
@@ -283,64 +293,12 @@ func TestLockForceRemoval(t *testing.T) {
deletePrm.WithAddress(objectcore.AddressOf(lock))
deletePrm.WithForceRemoval()
- require.NoError(t, e.Delete(context.Background(), deletePrm))
+ _, err = e.Delete(context.Background(), deletePrm)
+ require.NoError(t, err)
// 5.
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- err = e.Inhume(context.Background(), inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
-
-func TestLockExpiredRegularObject(t *testing.T) {
- const currEpoch = 42
- const objectExpiresAfter = currEpoch - 1
-
- engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
- return []shard.Option{
- shard.WithDisabledGC(),
- shard.WithMetaBaseOptions(append(
- testGetDefaultMetabaseOptions(t),
- meta.WithEpochState(epochState{currEpoch}),
- )...),
- }
- }).prepare(t).engine
-
- cnr := cidtest.ID()
-
- object := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter))
-
- address := objectcore.AddressOf(object)
-
- var putPrm PutPrm
- putPrm.Object = object
- require.NoError(t, engine.Put(context.Background(), putPrm))
-
- var getPrm GetPrm
- var errNotFound *apistatus.ObjectNotFound
-
- getPrm.WithAddress(address)
- _, err := engine.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &errNotFound)
-
- t.Run("lock expired regular object", func(t *testing.T) {
- engine.Lock(context.Background(),
- address.Container(),
- oidtest.ID(),
- []oid.ID{address.Object()},
- )
-
- res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object))
- require.NoError(t, err)
- require.True(t, res)
- })
-
- t.Run("get expired and locked regular object", func(t *testing.T) {
- getPrm.WithAddress(objectcore.AddressOf(object))
-
- res, err := engine.Get(context.Background(), getPrm)
- require.NoError(t, err)
- require.Equal(t, res.Object(), object)
- })
-}
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 963292d83..fcac2dc60 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -3,16 +3,27 @@ package engine
import (
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
-type (
- MetricRegister = metrics.EngineMetrics
- GCMetrics = metrics.GCMetrics
- WriteCacheMetrics = metrics.WriteCacheMetrics
- NullBool = metrics.NullBool
-)
+type MetricRegister interface {
+ AddMethodDuration(method string, d time.Duration)
+
+ SetObjectCounter(shardID, objectType string, v uint64)
+ AddToObjectCounter(shardID, objectType string, delta int)
+
+ SetMode(shardID string, mode mode.Mode)
+
+ AddToContainerSize(cnrID string, size int64)
+ AddToPayloadCounter(shardID string, size int64)
+ IncErrorCounter(shardID string)
+ ClearErrorCounter(shardID string)
+ DeleteShardMetrics(shardID string)
+
+ WriteCache() metrics.WriteCacheMetrics
+ GC() metrics.GCMetrics
+}
func elapsed(method string, addFunc func(method string, d time.Duration)) func() {
t := time.Now()
@@ -27,10 +38,6 @@ type gcMetrics struct {
shardID string
}
-func (m *gcMetrics) SetShardID(id string) {
- m.shardID = id
-}
-
func (m *gcMetrics) AddRunDuration(d time.Duration, success bool) {
m.storage.AddRunDuration(m.shardID, d, success)
}
@@ -46,48 +53,3 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success
func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
}
-
-type (
- noopMetrics struct{}
- noopWriteCacheMetrics struct{}
- noopGCMetrics struct{}
-)
-
-var (
- _ MetricRegister = noopMetrics{}
- _ WriteCacheMetrics = noopWriteCacheMetrics{}
- _ GCMetrics = noopGCMetrics{}
-)
-
-func (noopMetrics) AddMethodDuration(string, time.Duration) {}
-func (noopMetrics) SetObjectCounter(string, string, uint64) {}
-func (noopMetrics) AddToObjectCounter(string, string, int) {}
-func (noopMetrics) SetMode(string, mode.Mode) {}
-func (noopMetrics) AddToContainerSize(string, int64) {}
-func (noopMetrics) DeleteContainerSize(string) {}
-func (noopMetrics) DeleteContainerCount(string) {}
-func (noopMetrics) AddToPayloadCounter(string, int64) {}
-func (noopMetrics) IncErrorCounter(string) {}
-func (noopMetrics) ClearErrorCounter(string) {}
-func (noopMetrics) DeleteShardMetrics(string) {}
-func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
-func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
-func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
-func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
-func (noopMetrics) SetRefillPercent(string, string, uint32) {}
-func (noopMetrics) SetRefillStatus(string, string, string) {}
-func (noopMetrics) SetEvacuationInProgress(string, bool) {}
-func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} }
-func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} }
-
-func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
-func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
-func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
-func (noopWriteCacheMetrics) SetMode(string, string) {}
-func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
-func (noopWriteCacheMetrics) Close(string, string) {}
-
-func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
-func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
-func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
-func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 10cf5ffd5..79ee3a997 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -9,8 +9,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -20,24 +21,16 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- Object *objectSDK.Object
- IsIndexedContainer bool
+ obj *objectSDK.Object
}
var errPutShard = errors.New("could not put object to any shard")
-type putToShardStatus byte
-
-const (
- putToShardUnknown putToShardStatus = iota
- putToShardSuccess
- putToShardExists
- putToShardRemoved
-)
-
-type putToShardRes struct {
- status putToShardStatus
- err error
+// WithObject is a Put option to set object to save.
+//
+// Option is required.
+func (p *PutPrm) WithObject(obj *objectSDK.Object) {
+ p.obj = obj
}
// Put saves the object to local storage.
@@ -51,10 +44,9 @@ type putToShardRes struct {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
- attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
+ attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
- defer elapsed("Put", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
err = e.put(ctx, prm)
@@ -65,122 +57,119 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
}
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
- addr := object.AddressOf(prm.Object)
+ if e.metrics != nil {
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
+ }
+
+ addr := object.AddressOf(prm.obj)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
- var ecParent oid.Address
- if prm.Object.ECHeader() != nil {
- ecParent.SetObject(prm.Object.ECHeader().Parent())
- ecParent.SetContainer(addr.Container())
- }
- var shPrm shard.ExistsPrm
- shPrm.Address = addr
- shPrm.ECParentAddress = ecParent
- existed, locked, err := e.exists(ctx, shPrm)
+ _, err := e.exists(ctx, addr)
if err != nil {
return err
}
- if !existed && locked {
- lockers, err := e.GetLocks(ctx, ecParent)
- if err != nil {
- return err
- }
- for _, locker := range lockers {
- err = e.lock(ctx, addr.Container(), locker, []oid.ID{addr.Object()})
- if err != nil {
- return err
- }
- }
- }
+ finished := false
- var shRes putToShardRes
- if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) {
+ e.iterateOverSortedShards(addr, func(ind int, sh hashedShard) (stop bool) {
e.mtx.RLock()
- _, ok := e.shards[sh.ID().String()]
+ pool, ok := e.shardPools[sh.ID().String()]
e.mtx.RUnlock()
if !ok {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer)
- return shRes.status != putToShardUnknown
- }); err != nil {
- return err
- }
- switch shRes.status {
- case putToShardUnknown:
- return errPutShard
- case putToShardRemoved:
- return shRes.err
- case putToShardExists, putToShardSuccess:
- return nil
- default:
- return errPutShard
+
+ putDone, exists := e.putToShard(ctx, sh, ind, pool, addr, prm.obj)
+ finished = putDone || exists
+ return finished
+ })
+
+ if !finished {
+ err = errPutShard
}
+
+ return err
}
// putToShard puts object to sh.
-// Return putToShardStatus and error if it is necessary to propagate an error upper.
-func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard,
- addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
-) (res putToShardRes) {
- var existPrm shard.ExistsPrm
- existPrm.Address = addr
+// First return value is true iff put has been successfully done.
+// Second return value is true iff object already exists.
+func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object) (bool, bool) {
+ var putSuccess, alreadyExists bool
- exists, err := sh.Exists(ctx, existPrm)
- if err != nil {
- if shard.IsErrObjectExpired(err) {
- // object is already found but
- // expired => do nothing with it
- res.status = putToShardExists
- } else {
- e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
+ exitCh := make(chan struct{})
+
+ if err := pool.Submit(func() {
+ defer close(exitCh)
+
+ var existPrm shard.ExistsPrm
+ existPrm.SetAddress(addr)
+
+ exists, err := sh.Exists(ctx, existPrm)
+ if err != nil {
+ if shard.IsErrObjectExpired(err) {
+ // object is already found but
+ // expired => do nothing with it
+ alreadyExists = true
+ }
+
+ return // this is not ErrAlreadyRemoved error so we can go to the next shard
}
- return // this is not ErrAlreadyRemoved error so we can go to the next shard
- }
+ alreadyExists = exists.Exists()
+ if alreadyExists {
+ if ind != 0 {
+ var toMoveItPrm shard.ToMoveItPrm
+ toMoveItPrm.SetAddress(addr)
- if exists.Exists() {
- res.status = putToShardExists
- return
- }
+ _, err = sh.ToMoveIt(ctx, toMoveItPrm)
+ if err != nil {
+ e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
+ zap.Stringer("shard", sh.ID()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ )
+ }
+ }
- var putPrm shard.PutPrm
- putPrm.SetObject(obj)
- putPrm.SetIndexAttributes(isIndexedContainer)
-
- _, err = sh.Put(ctx, putPrm)
- if err != nil {
- if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
- errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
- e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
- return
- }
- if client.IsErrObjectAlreadyRemoved(err) {
- e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
- zap.Stringer("shard_id", sh.ID()),
- zap.Error(err))
- res.status = putToShardRemoved
- res.err = err
return
}
- e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr))
- return
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+
+ _, err = sh.Put(ctx, putPrm)
+ if err != nil {
+ if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
+ errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard,
+ zap.Stringer("shard_id", sh.ID()),
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return
+ }
+
+ e.reportShardError(sh, "could not put object to shard", err)
+ return
+ }
+
+ putSuccess = true
+ }); err != nil {
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err))
+ close(exitCh)
}
- res.status = putToShardSuccess
+ <-exitCh
- return
+ return putSuccess, alreadyExists
}
// Put writes provided object to local storage.
-func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
- return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
+ var putPrm PutPrm
+ putPrm.WithObject(obj)
+
+ return storage.Put(ctx, putPrm)
}
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index 7ec4742d8..e45845d6c 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -64,15 +65,6 @@ func (r RngRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
- trace.WithAttributes(
- attribute.String("address", prm.addr.EncodeToString()),
- attribute.String("offset", strconv.FormatUint(prm.off, 10)),
- attribute.String("length", strconv.FormatUint(prm.ln, 10)),
- ))
- defer span.End()
- defer elapsed("GetRange", e.metrics.AddMethodDuration)()
-
err = e.execIfNotBlocked(func() error {
res, err = e.getRange(ctx, prm)
return err
@@ -82,6 +74,18 @@ func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, e
}
func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+
+ if e.metrics != nil {
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
+ }
+
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRange(prm.off, prm.ln)
@@ -93,16 +97,11 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
Engine: e,
}
- if err := it.tryGetWithMeta(ctx); err != nil {
- return RngRes{}, err
- }
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
}
- if it.ECInfo != nil {
- return RngRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo))
- }
if it.Object == nil {
// If any shard is in a degraded mode, we should assume that metabase could store
@@ -111,18 +110,17 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
return RngRes{}, it.OutError
}
- if err := it.tryGetFromBlobstor(ctx); err != nil {
- return RngRes{}, err
- }
+ it.tryGetFromBlobstor(ctx)
if it.Object == nil {
return RngRes{}, it.OutError
}
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
- e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
+ e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
- zap.Error(it.MetaError),
- zap.Stringer("address", prm.addr))
+ zap.String("error", it.MetaError.Error()),
+ zap.Stringer("address", prm.addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
@@ -149,8 +147,6 @@ type getRangeShardIterator struct {
Object *objectSDK.Object
SplitInfoError *objectSDK.SplitInfoError
SplitInfo *objectSDK.SplitInfo
- ECInfoError *objectSDK.ECInfoError
- ECInfo *objectSDK.ECInfo
OutError error
ShardWithMeta hashedShard
MetaError error
@@ -161,8 +157,8 @@ type getRangeShardIterator struct {
Engine *StorageEngine
}
-func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.HasDegraded = i.HasDegraded || noMeta
i.ShardPrm.SetIgnoreMeta(noMeta)
@@ -192,14 +188,6 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
// stop iterating over shards if SplitInfo structure is complete
return withLink && withLast
- case errors.As(err, &i.ECInfoError):
- if i.ECInfo == nil {
- i.ECInfo = objectSDK.NewECInfo()
- }
-
- util.MergeECInfo(i.ECInfoError.ECInfo(), i.ECInfo)
- // stop iterating over shards if ECInfo structure is complete
- return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total)
case
client.IsErrObjectAlreadyRemoved(err),
shard.IsErrOutOfRange(err):
@@ -207,19 +195,19 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error {
return true // stop, return it back
default:
- i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
+ i.Engine.reportShardError(sh, "could not get object from shard", err)
return false
}
})
}
-func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error {
+func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
i.ShardPrm.SetIgnoreMeta(true)
- return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
if sh.GetMode().NoMetabase() {
// Already processed it without a metabase.
return false
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
deleted file mode 100644
index a29dd7ed9..000000000
--- a/pkg/local_object_storage/engine/rebuild.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package engine
-
-import (
- "context"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
- "golang.org/x/sync/errgroup"
-)
-
-type RebuildPrm struct {
- ShardIDs []*shard.ID
- ConcurrencyLimit uint32
- TargetFillPercent uint32
-}
-
-type ShardRebuildResult struct {
- ShardID *shard.ID
- Success bool
- ErrorMsg string
-}
-
-type RebuildRes struct {
- ShardResults []ShardRebuildResult
-}
-
-func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild",
- trace.WithAttributes(
- attribute.Int("shard_id_count", len(prm.ShardIDs)),
- attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)),
- attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)),
- ))
- defer span.End()
-
- res := RebuildRes{
- ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)),
- }
- resGuard := &sync.Mutex{}
-
- concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)}
-
- eg, egCtx := errgroup.WithContext(ctx)
- for _, shardID := range prm.ShardIDs {
- eg.Go(func() error {
- e.mtx.RLock()
- sh, ok := e.shards[shardID.String()]
- e.mtx.RUnlock()
-
- if !ok {
- resGuard.Lock()
- defer resGuard.Unlock()
- res.ShardResults = append(res.ShardResults, ShardRebuildResult{
- ShardID: shardID,
- ErrorMsg: errShardNotFound.Error(),
- })
- return nil
- }
-
- err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
- ConcurrencyLimiter: concLimiter,
- TargetFillPercent: prm.TargetFillPercent,
- })
-
- resGuard.Lock()
- defer resGuard.Unlock()
-
- if err != nil {
- res.ShardResults = append(res.ShardResults, ShardRebuildResult{
- ShardID: shardID,
- ErrorMsg: err.Error(),
- })
- } else {
- res.ShardResults = append(res.ShardResults, ShardRebuildResult{
- ShardID: shardID,
- Success: true,
- })
- }
- return nil
- })
- }
-
- if err := eg.Wait(); err != nil {
- return RebuildRes{}, err
- }
- return res, nil
-}
-
-type concurrencyLimiter struct {
- semaphore chan struct{}
-}
-
-func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
- select {
- case l.semaphore <- struct{}{}:
- return l.releaseWorkSlot, nil
- case <-ctx.Done():
- return nil, ctx.Err()
- }
-}
-
-func (l *concurrencyLimiter) releaseWorkSlot() {
- <-l.semaphore
-}
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index 8ab3c5217..573153268 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
- e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
+ e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
@@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
- e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
+ e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
@@ -87,18 +87,18 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
}
})
- for range prm.Concurrency {
+ for i := 0; i < prm.Concurrency; i++ {
errG.Go(func() error {
return e.removeObjects(ctx, ch)
})
}
if err := errG.Wait(); err != nil {
- e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
+ e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
return err
}
}
- e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
+ e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
return nil
}
@@ -111,11 +111,11 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address
for addr := range ch {
h := hrw.StringHash(addr.EncodeToString())
- hrw.SortHasherSliceByValue(shards, h)
+ shards := sortShardsByWeight(shards, h)
found := false
for i := range shards {
var existsPrm shard.ExistsPrm
- existsPrm.Address = addr
+ existsPrm.SetAddress(addr)
res, err := shards[i].Exists(ctx, existsPrm)
if err != nil {
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
index 6d2291c74..8131fcf0d 100644
--- a/pkg/local_object_storage/engine/remove_copies_test.go
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -20,9 +20,6 @@ func TestRebalance(t *testing.T) {
t.Parallel()
te := newEngineWithErrorThreshold(t, "", 0)
- defer func() {
- require.NoError(t, te.ng.Close(context.Background()))
- }()
const (
objCount = 20
@@ -41,7 +38,7 @@ func TestRebalance(t *testing.T) {
obj.SetPayload(make([]byte, errSmallSize))
objects[i].object = obj
- shards := te.ng.sortShards(object.AddressOf(obj))
+ shards := te.ng.sortShardsByWeight(object.AddressOf(obj))
objects[i].bestShard = *shards[0].Shard.ID()
objects[i].worstShard = *shards[1].Shard.ID()
}
@@ -96,7 +93,7 @@ loop:
require.FailNow(t, "unexpected object was removed", removed[i].addr)
}
- for i := range copyCount {
+ for i := 0; i < copyCount; i++ {
if i%3 == 0 {
require.True(t, removedMask[i], "object %d was expected to be removed", i)
} else {
@@ -109,9 +106,6 @@ func TestRebalanceSingleThread(t *testing.T) {
t.Parallel()
te := newEngineWithErrorThreshold(t, "", 0)
- defer func() {
- require.NoError(t, te.ng.Close(context.Background()))
- }()
obj := testutil.GenerateObjectWithCID(cidtest.ID())
obj.SetPayload(make([]byte, errSmallSize))
@@ -159,9 +153,6 @@ type deleteEvent struct {
func TestRebalanceExitByContext(t *testing.T) {
te := newEngineWithErrorThreshold(t, "", 0)
- defer func() {
- require.NoError(t, te.ng.Close(context.Background()))
- }()
objects := make([]*objectSDK.Object, 4)
for i := range objects {
@@ -207,7 +198,7 @@ func TestRebalanceExitByContext(t *testing.T) {
}()
const removeCount = 3
- for range removeCount - 1 {
+ for i := 0; i < removeCount-1; i++ {
<-deleteCh
signal <- struct{}{}
}
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 4243a5481..6a8c9fab9 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -14,9 +14,8 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- indexedContainer bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -25,9 +24,8 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID) {
p.cnr = cnr
- p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
@@ -51,29 +49,33 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
- defer elapsed("Select", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- var sErr error
- res, sErr = e._select(ctx, prm)
- return sErr
+ res, err = e._select(ctx, prm)
+ return err
})
return
}
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
+ if e.metrics != nil {
+ defer elapsed("Search", e.metrics.AddMethodDuration)()
+ }
+
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
+ var outError error
+
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
+ shPrm.SetContainerID(prm.cnr)
shPrm.SetFilters(prm.filters)
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Select(ctx, shPrm)
if err != nil {
- e.reportShardError(ctx, sh, "could not select objects from shard", err)
+ e.reportShardError(sh, "could not select objects from shard", err)
return false
}
@@ -85,13 +87,11 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
}
return false
- }); err != nil {
- return SelectRes{}, err
- }
+ })
return SelectRes{
addrList: addrList,
- }, nil
+ }, outError
}
// List returns `limit` available physically storage object addresses in engine.
@@ -99,26 +99,28 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) {
- defer elapsed("List", e.metrics.AddMethodDuration)()
err = e.execIfNotBlocked(func() error {
- var lErr error
- res, lErr = e.list(ctx, limit)
- return lErr
+ res, err = e.list(ctx, limit)
+ return err
})
return
}
func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
+ if e.metrics != nil {
+ defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
+ }
+
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
ln := uint64(0)
// consider iterating over shuffled shards
- if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) {
+ e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.List(ctx) // consider limit result of shard iterator
if err != nil {
- e.reportShardError(ctx, sh, "could not select objects from shard", err)
+ e.reportShardError(sh, "could not select objects from shard", err)
} else {
for _, addr := range res.AddressList() { // save only unique values
if _, ok := uniqueMap[addr.EncodeToString()]; !ok {
@@ -134,9 +136,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
return false
- }); err != nil {
- return SelectRes{}, err
- }
+ })
return SelectRes{
addrList: addrList,
@@ -144,9 +144,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
// Select selects objects from local storage using provided filters.
-func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr, isIndexedContainer)
+ selectPrm.WithContainerID(cnr)
selectPrm.WithFilters(fs)
res, err := storage.Select(ctx, selectPrm)
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 69067c500..4b9d8752a 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -2,23 +2,18 @@ package engine
import (
"context"
- "errors"
"fmt"
- "sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/google/uuid"
+ "github.com/panjf2000/ants/v2"
"go.uber.org/zap"
- "golang.org/x/sync/errgroup"
)
var errShardNotFound = logicerr.New("shard not found")
@@ -51,6 +46,10 @@ func (m *metricsWithID) IncObjectCounter(objectType string) {
m.mw.AddToObjectCounter(m.id, objectType, +1)
}
+func (m *metricsWithID) DecObjectCounter(objectType string) {
+ m.mw.AddToObjectCounter(m.id, objectType, -1)
+}
+
func (m *metricsWithID) SetMode(mode mode.Mode) {
m.mw.SetMode(m.id, mode)
}
@@ -75,34 +74,6 @@ func (m *metricsWithID) DeleteShardMetrics() {
m.mw.DeleteShardMetrics(m.id)
}
-func (m *metricsWithID) SetContainerObjectsCount(cnrID string, objectType string, value uint64) {
- m.mw.SetContainerObjectCounter(m.id, cnrID, objectType, value)
-}
-
-func (m *metricsWithID) IncContainerObjectsCount(cnrID string, objectType string) {
- m.mw.IncContainerObjectCounter(m.id, cnrID, objectType)
-}
-
-func (m *metricsWithID) SubContainerObjectsCount(cnrID string, objectType string, value uint64) {
- m.mw.SubContainerObjectCounter(m.id, cnrID, objectType, value)
-}
-
-func (m *metricsWithID) IncRefillObjectsCount(path string, size int, success bool) {
- m.mw.IncRefillObjectsCount(m.id, path, size, success)
-}
-
-func (m *metricsWithID) SetRefillPercent(path string, percent uint32) {
- m.mw.SetRefillPercent(m.id, path, percent)
-}
-
-func (m *metricsWithID) SetRefillStatus(path string, status string) {
- m.mw.SetRefillStatus(m.id, path, status)
-}
-
-func (m *metricsWithID) SetEvacuationInProgress(value bool) {
- m.mw.SetEvacuationInProgress(m.id, value)
-}
-
// AddShard adds a new shard to the storage engine.
//
// Returns any error encountered that did not allow adding a shard.
@@ -110,15 +81,17 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) {
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
sh, err := e.createShard(ctx, opts)
if err != nil {
- return nil, fmt.Errorf("create a shard: %w", err)
+ return nil, fmt.Errorf("could not create a shard: %w", err)
}
err = e.addShard(sh)
if err != nil {
- return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
+ return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
}
- e.metrics.SetMode(sh.ID().String(), sh.GetMode())
+ if e.cfg.metrics != nil {
+ e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
+ }
return sh.ID(), nil
}
@@ -126,7 +99,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
id, err := generateShardID()
if err != nil {
- return nil, fmt.Errorf("generate shard ID: %w", err)
+ return nil, fmt.Errorf("could not generate shard ID: %w", err)
}
opts = e.appendMetrics(id, opts)
@@ -136,42 +109,42 @@ func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorByID),
- shard.WithZeroSizeCallback(e.processZeroSizeContainers),
- shard.WithZeroCountCallback(e.processZeroCountContainers),
+ shard.WithReportErrorFunc(e.reportShardErrorBackground),
)...)
if err := sh.UpdateID(ctx); err != nil {
- e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
+ return nil, fmt.Errorf("could not update shard ID: %w", err)
}
- return sh, nil
+ return sh, err
}
func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard.Option {
e.mtx.RLock()
defer e.mtx.RUnlock()
- opts = append(opts,
- shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ),
- shard.WithWriteCacheMetrics(
- &writeCacheMetrics{
- shardID: id.String(),
- metrics: e.metrics.WriteCache(),
- },
- ),
- shard.WithGCMetrics(
- &gcMetrics{
- storage: e.metrics.GC(),
- shardID: id.String(),
- },
- ),
- )
+ if e.metrics != nil {
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
+ }
return opts
}
@@ -180,6 +153,11 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.mtx.Lock()
defer e.mtx.Unlock()
+ pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
+ if err != nil {
+ return fmt.Errorf("could not create pool: %w", err)
+ }
+
strID := sh.ID().String()
if _, ok := e.shards[strID]; ok {
return fmt.Errorf("shard with id %s was already added", strID)
@@ -193,12 +171,14 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
hash: hrw.StringHash(strID),
}
+ e.shardPools[strID] = pool
+
return nil
}
// removeShards removes specified shards. Skips non-existent shards.
// Logs errors about shards that it could not Close after the removal.
-func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
+func (e *StorageEngine) removeShards(ids ...string) {
if len(ids) == 0 {
return
}
@@ -212,27 +192,33 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
continue
}
- e.metrics.DeleteShardMetrics(id)
+ sh.DeleteShardMetrics()
ss = append(ss, sh)
delete(e.shards, id)
- e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
+ pool, ok := e.shardPools[id]
+ if ok {
+ pool.Release()
+ delete(e.shardPools, id)
+ }
+
+ e.log.Info(logs.EngineShardHasBeenRemoved,
zap.String("id", id))
}
e.mtx.Unlock()
for _, sh := range ss {
- err := sh.SetMode(ctx, mode.Disabled)
+ err := sh.SetMode(mode.Disabled)
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
+ e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
}
- err = sh.Close(ctx)
+ err = sh.Close()
if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
+ e.log.Error(logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
@@ -254,16 +240,31 @@ func generateShardID() (*shard.ID, error) {
return shard.NewIDFromBytes(bin), nil
}
-func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) []hashedShard {
+func (e *StorageEngine) shardWeight(sh *shard.Shard) float64 {
+ weightValues := sh.WeightValues()
+
+ return float64(weightValues.FreeSpace)
+}
+
+func (e *StorageEngine) sortShardsByWeight(objAddr interface{ EncodeToString() string }) []hashedShard {
e.mtx.RLock()
defer e.mtx.RUnlock()
h := hrw.StringHash(objAddr.EncodeToString())
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, sh)
+ shards = append(shards, hashedShard(sh))
}
- hrw.SortHasherSliceByValue(shards, h)
+ return sortShardsByWeight(shards, h)
+}
+
+func sortShardsByWeight(shards []hashedShard, h uint64) []hashedShard {
+ weights := make([]float64, 0, len(shards))
+ for _, sh := range shards {
+ weights = append(weights, float64(sh.Shard.WeightValues().FreeSpace))
+ }
+
+ hrw.SortHasherSliceByWeightValue(shards, weights, h)
return shards
}
@@ -274,44 +275,32 @@ func (e *StorageEngine) unsortedShards() []hashedShard {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
- shards = append(shards, sh)
+ shards = append(shards, hashedShard(sh))
}
return shards
}
-func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error {
- for i, sh := range e.sortShards(addr) {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
+func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) {
+ for i, sh := range e.sortShardsByWeight(addr) {
if handler(i, sh) {
break
}
}
- return nil
}
-func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error {
+func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) {
for _, sh := range e.unsortedShards() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
if handler(sh) {
break
}
}
- return nil
}
// SetShardMode sets mode of the shard with provided identifier.
//
// Returns an error if shard mode was not set, or shard was not found in storage engine.
-func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
+func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -319,9 +308,9 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
- e.metrics.ClearErrorCounter(shID)
+ sh.Shard.ClearErrorCounter()
}
- return sh.SetMode(ctx, m)
+ return sh.SetMode(m)
}
}
@@ -329,154 +318,17 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
}
// HandleNewEpoch notifies every shard about NewEpoch event.
-func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
+func (e *StorageEngine) HandleNewEpoch(epoch uint64) {
+ ev := shard.EventNewEpoch(epoch)
+
e.mtx.RLock()
defer e.mtx.RUnlock()
for _, sh := range e.shards {
- select {
- case <-ctx.Done():
- return
- case sh.NotificationChannel() <- epoch:
- default:
- e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
- zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
- }
+ sh.NotificationChannel() <- ev
}
}
-func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error {
- if len(ids) == 0 {
- return logicerr.New("ids must be non-empty")
- }
-
- deletedShards, err := e.deleteShards(ctx, ids)
- if err != nil {
- return err
- }
-
- return e.closeShards(ctx, deletedShards)
-}
-
-// closeShards closes deleted shards. Tries to close all shards.
-// Returns single error with joined shard errors.
-func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error {
- var multiErr error
- var multiErrGuard sync.Mutex
- var eg errgroup.Group
- for _, sh := range deletedShards {
- eg.Go(func() error {
- err := sh.SetMode(ctx, mode.Disabled)
- if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled,
- zap.Stringer("id", sh.ID()),
- zap.Error(err),
- )
- multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
- multiErrGuard.Unlock()
- }
-
- err = sh.Close(ctx)
- if err != nil {
- e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard,
- zap.Stringer("id", sh.ID()),
- zap.Error(err),
- )
- multiErrGuard.Lock()
- multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
- multiErrGuard.Unlock()
- }
- return nil
- })
- }
- if err := eg.Wait(); err != nil {
- return err
- }
- return multiErr
-}
-
-// deleteShards deletes shards with specified ids from engine shard list
-// and releases all engine resources associated with shards.
-// Returns deleted shards or error if some shard could not be deleted.
-func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) {
- ss := make([]hashedShard, 0, len(ids))
-
- e.mtx.Lock()
- defer e.mtx.Unlock()
-
- for _, id := range ids {
- idStr := id.String()
- sh, found := e.shards[idStr]
- if !found {
- return nil, errShardNotFound
- }
- ss = append(ss, sh)
- }
-
- if len(ss) == len(e.shards) {
- return nil, logicerr.New("could not delete all the shards")
- }
-
- for _, sh := range ss {
- idStr := sh.ID().String()
-
- e.metrics.DeleteShardMetrics(idStr)
-
- delete(e.shards, idStr)
-
- e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
- zap.String("id", idStr))
- }
-
- return ss, nil
-}
-
func (s hashedShard) Hash() uint64 {
return s.hash
}
-
-func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) {
- var err error
- var info []shard.Info
- prm := shard.ExistsPrm{
- Address: obj,
- }
- var siErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
-
- if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) {
- res, exErr := hs.Exists(ctx, prm)
- if exErr != nil {
- if client.IsErrObjectAlreadyRemoved(exErr) {
- err = new(apistatus.ObjectAlreadyRemoved)
- return true
- }
-
- // Check if error is either SplitInfoError or ECInfoError.
- // True means the object is virtual.
- if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) {
- info = append(info, hs.DumpInfo())
- return false
- }
-
- if shard.IsErrObjectExpired(exErr) {
- err = exErr
- return true
- }
-
- if !client.IsErrObjectNotFound(exErr) {
- e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address))
- }
-
- return false
- }
- if res.Exists() {
- info = append(info, hs.DumpInfo())
- }
- return false
- }); itErr != nil {
- return nil, itErr
- }
- return info, err
-}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index 3aa9629b0..e13017e24 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -4,19 +4,19 @@ import (
"context"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/hrw"
"github.com/stretchr/testify/require"
)
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
- te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
+ te := testNewEngine(t).setShardsNum(t, numOfShards)
e, ids := te.engine, te.shardIDs
- defer func() { require.NoError(t, e.Close(context.Background())) }()
+ t.Cleanup(func() {
+ e.Close(context.Background())
+ })
+ require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards))
removedNum := numOfShards / 2
@@ -32,10 +32,11 @@ func TestRemoveShard(t *testing.T) {
for id, remove := range mSh {
if remove {
- e.removeShards(context.Background(), id)
+ e.removeShards(id)
}
}
+ require.Equal(t, numOfShards-removedNum, len(e.shardPools))
require.Equal(t, numOfShards-removedNum, len(e.shards))
for id, removed := range mSh {
@@ -43,45 +44,3 @@ func TestRemoveShard(t *testing.T) {
require.True(t, ok != removed)
}
}
-
-func TestDisableShards(t *testing.T) {
- t.Parallel()
-
- const numOfShards = 2
-
- te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
- e, ids := te.engine, te.shardIDs
- defer func() { require.NoError(t, e.Close(context.Background())) }()
-
- require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
- require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
-
- require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
-
- require.Equal(t, 1, len(e.shards))
-}
-
-func TestSortShardsByWeight(t *testing.T) {
- t.Parallel()
-
- const numOfShards = 500
-
- var shards1 []hashedShard
- var weights1 []float64
- var shards2 []hashedShard
- for i := range numOfShards {
- shards1 = append(shards1, hashedShard{
- hash: uint64(i),
- })
- weights1 = append(weights1, 0)
- shards2 = append(shards2, hashedShard{
- hash: uint64(i),
- })
- }
-
- hrw.SortHasherSliceByWeightValue(shards1, weights1, 0)
- hrw.SortHasherSliceByValue(shards2, 0)
-
- require.Equal(t, shards1, shards2)
-}
diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go
index cfd15b4d4..6c3594bab 100644
--- a/pkg/local_object_storage/engine/tree.go
+++ b/pkg/local_object_storage/engine/tree.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.opentelemetry.io/otel/attribute"
@@ -36,9 +37,10 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor,
lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err,
+ e.reportShardError(lst[index], "can't perform `TreeMove`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return nil, err
@@ -69,9 +71,10 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip
lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err,
+ e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
zap.Stringer("cid", d.CID),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return nil, err
}
@@ -97,36 +100,10 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str
err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err,
+ e.reportShardError(lst[index], "can't perform `TreeApply`", err,
zap.Stringer("cid", cnr),
- zap.String("tree", treeID))
- }
- return err
- }
- return nil
-}
-
-// TreeApplyBatch implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- index, lst, err := e.getTreeShard(ctx, cnr, treeID)
- if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- return err
- }
-
- err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m)
- if err != nil {
- if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err,
- zap.Stringer("cid", cnr),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return err
}
@@ -148,16 +125,17 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID
var err error
var nodes []pilorama.Node
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err,
+ e.reportShardError(sh, "can't perform `TreeGetByPath`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -180,16 +158,17 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s
var err error
var m pilorama.Meta
var p uint64
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err,
+ e.reportShardError(sh, "can't perform `TreeGetMeta`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -211,16 +190,17 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
var err error
var nodes []pilorama.NodeInfo
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err,
+ e.reportShardError(sh, "can't perform `TreeGetChildren`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -229,37 +209,6 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
return nil, err
}
-// TreeSortedByFilename implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
- trace.WithAttributes(
- attribute.String("container_id", cid.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- var err error
- var nodes []pilorama.MultiNodeInfo
- var cursor *pilorama.Cursor
- for _, sh := range e.sortShards(cid) {
- nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
- if err != nil {
- if err == shard.ErrPiloramaDisabled {
- break
- }
- if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err,
- zap.Stringer("cid", cid),
- zap.String("tree", treeID))
- }
- continue
- }
- return nodes, cursor, nil
- }
- return nil, last, err
-}
-
// TreeGetOpLog implements the pilorama.Forest interface.
func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog",
@@ -273,16 +222,17 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID
var err error
var lm pilorama.Move
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err,
+ e.reportShardError(sh, "can't perform `TreeGetOpLog`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -302,16 +252,17 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri
defer span.End()
var err error
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
err = sh.TreeDrop(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) {
- e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err,
+ e.reportShardError(sh, "can't perform `TreeDrop`", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -338,8 +289,9 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string,
return nil, err
}
- e.reportShardError(ctx, sh, "can't perform `TreeList`", err,
- zap.Stringer("cid", cid))
+ e.reportShardError(sh, "can't perform `TreeList`", err,
+ zap.Stringer("cid", cid),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
// returns as much info about
// trees as possible
@@ -403,9 +355,10 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK
err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
- e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err,
+ e.reportShardError(lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
return err
}
@@ -422,16 +375,17 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t
var err error
var height uint64
- for _, sh := range e.sortShards(cid) {
+ for _, sh := range e.sortShardsByWeight(cid) {
height, err = sh.TreeLastSyncHeight(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
}
if !errors.Is(err, pilorama.ErrTreeNotFound) {
- e.reportShardError(ctx, sh, "can't read tree synchronization height", err,
+ e.reportShardError(sh, "can't read tree synchronization height", err,
zap.Stringer("cid", cid),
- zap.String("tree", treeID))
+ zap.String("tree", treeID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
continue
}
@@ -441,7 +395,7 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t
}
func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
- lst := e.sortShards(cid)
+ lst := e.sortShardsByWeight(cid)
for i, sh := range lst {
exists, err := sh.TreeExists(ctx, cid, treeID)
if err != nil {
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index ea0a9e74e..f1650b5ae 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/stretchr/testify/require"
)
func BenchmarkTreeVsSearch(b *testing.B) {
@@ -26,18 +25,14 @@ func BenchmarkTreeVsSearch(b *testing.B) {
func benchmarkTreeVsSearch(b *testing.B, objCount int) {
te := newEngineWithErrorThreshold(b, "", 0)
- defer func() {
- require.NoError(b, te.ng.Close(context.Background()))
- }()
-
cid := cidtest.ID()
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
treeID := "someTree"
- for i := range objCount {
+ for i := 0; i < objCount; i++ {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(context.Background(), te.ng, obj, false)
+ err := Put(context.Background(), te.ng, obj)
if err != nil {
b.Fatal(err)
}
@@ -50,13 +45,13 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid, true)
+ prm.WithContainerID(cid)
var fs objectSDK.SearchFilters
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
prm.WithFilters(fs)
- for range b.N {
+ for i := 0; i < b.N; i++ {
res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
@@ -67,7 +62,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
}
})
b.Run("TreeGetByPath", func(b *testing.B) {
- for range b.N {
+ for i := 0; i < b.N; i++ {
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index e9ba3410f..2e518c6ff 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -2,24 +2,21 @@ package engine
import (
"context"
- "sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "golang.org/x/sync/errgroup"
)
// FlushWriteCachePrm groups the parameters of FlushWriteCache operation.
type FlushWriteCachePrm struct {
shardID *shard.ID
ignoreErrors bool
- seal bool
}
// SetShardID is an option to set shard ID.
@@ -29,16 +26,11 @@ func (p *FlushWriteCachePrm) SetShardID(id *shard.ID) {
p.shardID = id
}
-// SetIgnoreErrors sets errors ignore flag.
+// SetIgnoreErrors sets errors ignore flag..
func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
p.ignoreErrors = ignore
}
-// SetSeal sets seal flag.
-func (p *FlushWriteCachePrm) SetSeal(v bool) {
- p.seal = v
-}
-
// FlushWriteCacheRes groups the resulting values of FlushWriteCache operation.
type FlushWriteCacheRes struct{}
@@ -46,9 +38,8 @@ type FlushWriteCacheRes struct{}
func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache",
trace.WithAttributes(
- attribute.String("shard_id", p.shardID.String()),
+ attribute.String("shard)id", p.shardID.String()),
attribute.Bool("ignore_errors", p.ignoreErrors),
- attribute.Bool("seal", p.seal),
))
defer span.End()
@@ -62,133 +53,56 @@ func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePr
var prm shard.FlushWriteCachePrm
prm.SetIgnoreErrors(p.ignoreErrors)
- prm.SetSeal(p.seal)
return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm)
}
-type SealWriteCachePrm struct {
- ShardIDs []*shard.ID
- IgnoreErrors bool
- Async bool
- RestoreMode bool
- Shrink bool
-}
-
-type ShardSealResult struct {
- ShardID *shard.ID
- Success bool
- ErrorMsg string
-}
-
-type SealWriteCacheRes struct {
- ShardResults []ShardSealResult
-}
-
-// SealWriteCache flushed all data to blobstore and moves write-cache to degraded read only mode.
-func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePrm) (SealWriteCacheRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.SealWriteCache",
- trace.WithAttributes(
- attribute.Int("shard_id_count", len(prm.ShardIDs)),
- attribute.Bool("ignore_errors", prm.IgnoreErrors),
- attribute.Bool("restore_mode", prm.RestoreMode),
- ))
- defer span.End()
-
- res := SealWriteCacheRes{
- ShardResults: make([]ShardSealResult, 0, len(prm.ShardIDs)),
- }
- resGuard := &sync.Mutex{}
-
- eg, egCtx := errgroup.WithContext(ctx)
- for _, shardID := range prm.ShardIDs {
- eg.Go(func() error {
- e.mtx.RLock()
- sh, ok := e.shards[shardID.String()]
- e.mtx.RUnlock()
-
- if !ok {
- resGuard.Lock()
- defer resGuard.Unlock()
- res.ShardResults = append(res.ShardResults, ShardSealResult{
- ShardID: shardID,
- ErrorMsg: errShardNotFound.Error(),
- })
- return nil
- }
-
- err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, Async: prm.Async, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink})
-
- resGuard.Lock()
- defer resGuard.Unlock()
-
- if err != nil {
- res.ShardResults = append(res.ShardResults, ShardSealResult{
- ShardID: shardID,
- ErrorMsg: err.Error(),
- })
- } else {
- res.ShardResults = append(res.ShardResults, ShardSealResult{
- ShardID: shardID,
- Success: true,
- })
- }
- return nil
- })
- }
-
- if err := eg.Wait(); err != nil {
- return SealWriteCacheRes{}, err
- }
- return res, nil
-}
-
type writeCacheMetrics struct {
shardID string
metrics metrics.WriteCacheMetrics
- path string
-}
-
-func (m *writeCacheMetrics) SetPath(path string) {
- m.path = path
-}
-
-func (m *writeCacheMetrics) SetShardID(id string) {
- m.shardID = id
}
func (m *writeCacheMetrics) Get(d time.Duration, success bool, st writecache.StorageType) {
- m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Get", success, d)
+ m.metrics.AddMethodDuration(m.shardID, "Get", success, d, st.String())
}
func (m *writeCacheMetrics) Delete(d time.Duration, success bool, st writecache.StorageType) {
- m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Delete", success, d)
+ m.metrics.AddMethodDuration(m.shardID, "Delete", success, d, st.String())
+ if success {
+ m.metrics.DecActualCount(m.shardID, st.String())
+ }
}
func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.StorageType) {
- m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d)
+ m.metrics.AddMethodDuration(m.shardID, "Put", success, d, st.String())
+ if success {
+ m.metrics.IncActualCount(m.shardID, st.String())
+ }
}
-func (m *writeCacheMetrics) SetEstimateSize(size uint64) {
- m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size)
+func (m *writeCacheMetrics) SetEstimateSize(db, fstree uint64) {
+ m.metrics.SetEstimateSize(m.shardID, db, writecache.StorageTypeDB.String())
+ m.metrics.SetEstimateSize(m.shardID, fstree, writecache.StorageTypeFSTree.String())
}
-func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) {
- m.metrics.SetMode(m.shardID, mod.String())
+func (m *writeCacheMetrics) SetMode(mode mode.Mode) {
+ m.metrics.SetMode(m.shardID, mode.String())
}
-func (m *writeCacheMetrics) SetActualCounters(count uint64) {
- m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count)
+func (m *writeCacheMetrics) SetActualCounters(db, fstree uint64) {
+ m.metrics.SetActualCount(m.shardID, db, writecache.StorageTypeDB.String())
+ m.metrics.SetActualCount(m.shardID, fstree, writecache.StorageTypeFSTree.String())
}
func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) {
- m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Flush", metrics.NullBool{Bool: success, Valid: true})
+ m.metrics.IncOperationCounter(m.shardID, "Flush", metrics.NullBool{Bool: success, Valid: true}, st.String())
}
func (m *writeCacheMetrics) Evict(st writecache.StorageType) {
- m.metrics.IncOperationCounter(m.shardID, m.path, st.String(), "Evict", metrics.NullBool{})
+ m.metrics.DecActualCount(m.shardID, st.String())
+ m.metrics.IncOperationCounter(m.shardID, "Evict", metrics.NullBool{}, st.String())
}
func (m *writeCacheMetrics) Close() {
- m.metrics.Close(m.shardID, m.path)
+ m.metrics.Close(m.shardID)
}
diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go
index 6b101fa60..82024ffa7 100644
--- a/pkg/local_object_storage/internal/log/log.go
+++ b/pkg/local_object_storage/internal/log/log.go
@@ -1,16 +1,16 @@
package storagelog
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
+// headMsg is a distinctive part of all messages.
+const headMsg = "local object storage operation"
+
// Write writes message about storage engine's operation to logger.
-func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
- logger.Debug(ctx, logs.StorageOperation, fields...)
+func Write(logger *logger.Logger, fields ...zap.Field) {
+ logger.Info(headMsg, fields...)
}
// AddressField returns logger's field for object address.
diff --git a/pkg/local_object_storage/internal/metaerr/error_test.go b/pkg/local_object_storage/internal/metaerr/error_test.go
index acde48793..5a16aa501 100644
--- a/pkg/local_object_storage/internal/metaerr/error_test.go
+++ b/pkg/local_object_storage/internal/metaerr/error_test.go
@@ -48,7 +48,6 @@ func TestError(t *testing.T) {
}
})
}
-
func TestNilWrap(t *testing.T) {
require.NoError(t, Wrap(nil))
}
diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go
index d46365296..ec60a2d0e 100644
--- a/pkg/local_object_storage/internal/storagetest/storage.go
+++ b/pkg/local_object_storage/internal/storagetest/storage.go
@@ -10,10 +10,10 @@ import (
// Component represents single storage component.
type Component interface {
- Open(context.Context, mode.Mode) error
- SetMode(context.Context, mode.Mode) error
- Init(context.Context) error
- Close(context.Context) error
+ Open(context.Context, bool) error
+ SetMode(mode.Mode) error
+ Init() error
+ Close() error
}
// Constructor constructs storage component.
@@ -58,19 +58,19 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
t.Run("RW", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
s := cons(t)
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.Close())
})
t.Run("RO", func(t *testing.T) {
// Use-case: irrecoverable error on some components, close everything.
// Open in read-only must be done after the db is here.
s := cons(t)
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.Close())
- require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), true))
+ require.NoError(t, s.Close())
})
}
@@ -78,10 +78,10 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) {
func TestCloseTwice(t *testing.T, cons Constructor) {
// Use-case: move to maintenance mode twice, first time failed.
s := cons(t)
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.Close(context.Background()))
- require.NoError(t, s.Close(context.Background())) // already closed, no-op
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.Close())
+ require.NoError(t, s.Close()) // already closed, no-op
}
// TestSetMode checks that any mode transition can be done safely.
@@ -90,33 +90,33 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) {
// Use-case: metabase `Init` failed,
// call `SetMode` on all not-yet-initialized components.
s := cons(t)
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.SetMode(m))
t.Run("after open in RO", func(t *testing.T) {
- require.NoError(t, s.Close(context.Background()))
- require.NoError(t, s.Open(context.Background(), mode.ReadOnly))
- require.NoError(t, s.SetMode(context.Background(), m))
+ require.NoError(t, s.Close())
+ require.NoError(t, s.Open(context.Background(), true))
+ require.NoError(t, s.SetMode(m))
})
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Close())
})
t.Run("after init", func(t *testing.T) {
s := cons(t)
// Use-case: notmal node operation.
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.SetMode(context.Background(), m))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.SetMode(m))
+ require.NoError(t, s.Close())
})
}
func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) {
// Use-case: normal node operation.
s := cons(t)
- require.NoError(t, s.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, s.Init(context.Background()))
- require.NoError(t, s.SetMode(context.Background(), from))
- require.NoError(t, s.SetMode(context.Background(), to))
- require.NoError(t, s.Close(context.Background()))
+ require.NoError(t, s.Open(context.Background(), false))
+ require.NoError(t, s.Init())
+ require.NoError(t, s.SetMode(from))
+ require.NoError(t, s.SetMode(to))
+ require.NoError(t, s.Close())
}
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
index 52b199b0b..383c596af 100644
--- a/pkg/local_object_storage/internal/testutil/generators.go
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -1,9 +1,7 @@
package testutil
import (
- cryptorand "crypto/rand"
"encoding/binary"
- "math/rand"
"sync/atomic"
"testing"
@@ -11,6 +9,7 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/rand"
)
// AddressGenerator is the interface of types that generate object addresses.
@@ -62,7 +61,7 @@ var _ ObjectGenerator = &SeqObjGenerator{}
func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object {
data := make([]byte, sz)
- _, _ = cryptorand.Read(data)
+ _, _ = rand.Read(data)
obj := GenerateObjectWithCIDWithPayload(cid, data)
obj.SetID(oid)
return obj
@@ -83,7 +82,7 @@ var _ ObjectGenerator = &RandObjGenerator{}
func (g *RandObjGenerator) Next() *objectSDK.Object {
var id oid.ID
- _, _ = cryptorand.Read(id[:])
+ _, _ = rand.Read(id[:])
return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
}
diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go
index cc6f726a4..f2c60f9b4 100644
--- a/pkg/local_object_storage/internal/testutil/generators_test.go
+++ b/pkg/local_object_storage/internal/testutil/generators_test.go
@@ -2,10 +2,10 @@ package testutil
import (
"encoding/binary"
- "slices"
"testing"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
)
func TestOverwriteObjGenerator(t *testing.T) {
@@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
ObjSize: 10,
MaxObjects: 4,
}
- for range 40 {
+ for i := 0; i < 40; i++ {
obj := gen.Next()
id, isSet := obj.ID()
i := binary.LittleEndian.Uint64(id[:])
@@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
func TestRandObjGenerator(t *testing.T) {
gen := &RandObjGenerator{ObjSize: 10}
- for range 10 {
+ for i := 0; i < 10; i++ {
obj := gen.Next()
require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
@@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) {
func TestRandAddrGenerator(t *testing.T) {
gen := RandAddrGenerator(5)
- for range 50 {
+ for i := 0; i < 50; i++ {
addr := gen.Next()
id := addr.Object()
k := binary.LittleEndian.Uint64(id[:])
diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go
index 1087e40be..573a099ff 100644
--- a/pkg/local_object_storage/internal/testutil/object.go
+++ b/pkg/local_object_storage/internal/testutil/object.go
@@ -1,7 +1,6 @@
package testutil
import (
- "crypto/rand"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@@ -12,6 +11,7 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
)
const defaultDataSize = 32
@@ -49,7 +49,6 @@ func GenerateObjectWithCIDWithPayload(cnr cid.ID, data []byte) *objectSDK.Object
obj.SetContainerID(cnr)
obj.SetVersion(&ver)
obj.SetPayload(data)
- obj.SetPayloadSize(uint64(len(data)))
obj.SetPayloadChecksum(csum)
obj.SetPayloadHomomorphicHash(csumTZ)
diff --git a/pkg/local_object_storage/metabase/VERSION.md b/pkg/local_object_storage/metabase/VERSION.md
index 9cfc95332..97e514db1 100644
--- a/pkg/local_object_storage/metabase/VERSION.md
+++ b/pkg/local_object_storage/metabase/VERSION.md
@@ -2,8 +2,6 @@
This file describes changes between the metabase versions.
-Warning: database schema below is outdated and incomplete, see source code.
-
## Current
### Primary buckets
@@ -88,11 +86,6 @@ Warning: database schema below is outdated and incomplete, see source code.
# History
-## Version 3
-
-- Payload hash, owner ID and FKBT buckets deleted
-- Expiration epoch to object ID and object ID to expiration epoch added
-
## Version 2
- Container ID is encoded as 32-byte slice
diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go
deleted file mode 100644
index de1479e6f..000000000
--- a/pkg/local_object_storage/metabase/bucket_cache.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package meta
-
-import (
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "go.etcd.io/bbolt"
-)
-
-type bucketCache struct {
- locked *bbolt.Bucket
- graveyard *bbolt.Bucket
- garbage *bbolt.Bucket
- expired map[cid.ID]*bbolt.Bucket
- primary map[cid.ID]*bbolt.Bucket
-}
-
-func newBucketCache() *bucketCache {
- return &bucketCache{}
-}
-
-func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(bucketNameLocked)
- }
- return getBucket(&bc.locked, tx, bucketNameLocked)
-}
-
-func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(graveyardBucketName)
- }
- return getBucket(&bc.graveyard, tx, graveyardBucketName)
-}
-
-func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
- if bc == nil {
- return tx.Bucket(garbageBucketName)
- }
- return getBucket(&bc.garbage, tx, garbageBucketName)
-}
-
-func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket {
- if *cache != nil {
- return *cache
- }
-
- *cache = tx.Bucket(name)
- return *cache
-}
-
-func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
- if bc == nil {
- bucketName := make([]byte, bucketKeySize)
- bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
- return tx.Bucket(bucketName)
- }
- return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
-}
-
-func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
- if bc == nil {
- bucketName := make([]byte, bucketKeySize)
- bucketName = primaryBucketName(cnr, bucketName)
- return tx.Bucket(bucketName)
- }
- return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
-}
-
-func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
- value, ok := (*m)[cnr]
- if ok {
- return value
- }
-
- if *m == nil {
- *m = make(map[cid.ID]*bbolt.Bucket, 1)
- }
-
- bucketName := make([]byte, bucketKeySize)
- bucketName = nameFunc(cnr, bucketName)
- (*m)[cnr] = getBucket(&value, tx, bucketName)
- return value
-}
diff --git a/pkg/local_object_storage/metabase/children.go b/pkg/local_object_storage/metabase/children.go
index acd367951..6816358d2 100644
--- a/pkg/local_object_storage/metabase/children.go
+++ b/pkg/local_object_storage/metabase/children.go
@@ -69,6 +69,7 @@ func (db *DB) GetChildren(ctx context.Context, addresses []oid.Address) (map[oid
}
return nil
})
+
if err != nil {
return nil, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go
index da27e6085..472b2affc 100644
--- a/pkg/local_object_storage/metabase/containers.go
+++ b/pkg/local_object_storage/metabase/containers.go
@@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) {
return result, err
}
-func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
+func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -64,22 +64,21 @@ func (db *DB) ContainerSize(id cid.ID) (uint64, error) {
return 0, ErrDegradedMode
}
- var size uint64
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- size = db.containerSize(tx, id)
+ err = db.boltDB.View(func(tx *bbolt.Tx) error {
+ size, err = db.containerSize(tx, id)
- return nil
+ return err
})
return size, metaerr.Wrap(err)
}
-func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 {
+func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) {
containerVolume := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
id.Encode(key)
- return parseContainerSize(containerVolume.Get(key))
+ return parseContainerSize(containerVolume.Get(key)), nil
}
func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool {
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 8d8d91dc7..4e2dd550d 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -3,6 +3,7 @@ package meta_test
import (
"context"
"math/rand"
+ "sort"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -18,13 +19,12 @@ func TestDB_Containers(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
const N = 10
cids := make(map[string]int, N)
- for range N {
+ for i := 0; i < N; i++ {
obj := testutil.GenerateObject()
cnr, _ := obj.ContainerID()
@@ -67,7 +67,24 @@ func TestDB_Containers(t *testing.T) {
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
+
+ cnrs, err = db.Containers(context.Background())
+ require.NoError(t, err)
+ assertContains(cnrs, cnr)
+ })
+
+ t.Run("ToMoveIt", func(t *testing.T) {
+ obj := testutil.GenerateObject()
+
+ require.NoError(t, putBig(db, obj))
+
+ cnrs, err := db.Containers(context.Background())
+ require.NoError(t, err)
+ cnr, _ := obj.ContainerID()
+ assertContains(cnrs, cnr)
+
+ require.NoError(t, metaToMoveIt(db, object.AddressOf(obj)))
cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
@@ -79,7 +96,6 @@ func TestDB_ContainersCount(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@@ -95,7 +111,7 @@ func TestDB_ContainersCount(t *testing.T) {
expected := make([]cid.ID, 0, R+T+SG+L)
for _, upload := range uploadObjects {
- for range upload.amount {
+ for i := 0; i < upload.amount; i++ {
obj := testutil.GenerateObject()
obj.SetType(upload.typ)
@@ -107,16 +123,24 @@ func TestDB_ContainersCount(t *testing.T) {
}
}
+ sort.Slice(expected, func(i, j int) bool {
+ return expected[i].EncodeToString() < expected[j].EncodeToString()
+ })
+
got, err := db.Containers(context.Background())
require.NoError(t, err)
- require.ElementsMatch(t, expected, got)
+
+ sort.Slice(got, func(i, j int) bool {
+ return got[i].EncodeToString() < got[j].EncodeToString()
+ })
+
+ require.Equal(t, expected, got)
}
func TestDB_ContainerSize(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
const (
C = 3
@@ -126,11 +150,11 @@ func TestDB_ContainerSize(t *testing.T) {
cids := make(map[cid.ID]int, C)
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
- for range C {
+ for i := 0; i < C; i++ {
cnr := cidtest.ID()
cids[cnr] = 0
- for range N {
+ for j := 0; j < N; j++ {
size := rand.Intn(1024)
parent := testutil.GenerateObjectWithCID(cnr)
@@ -164,7 +188,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.ID(),
+ oidtest.Address(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index c19c65224..3f155eeb5 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -21,66 +21,37 @@ var ErrDegradedMode = logicerr.New("metabase is in a degraded mode")
// ErrReadOnlyMode is returned when metabase is in a read-only mode.
var ErrReadOnlyMode = logicerr.New("metabase is in a read-only mode")
-var (
- mStaticBuckets = map[string]struct{}{
- string(containerVolumeBucketName): {},
- string(containerCounterBucketName): {},
- string(graveyardBucketName): {},
- string(garbageBucketName): {},
- string(shardInfoBucket): {},
- string(bucketNameLocked): {},
- string(expEpochToObjectBucketName): {},
- }
-
- // deprecatedBuckets buckets that are not used anymore.
- deprecatedBuckets = [][]byte{
- toMoveItBucketName,
- }
-)
-
// Open boltDB instance for metabase.
-func (db *DB) Open(ctx context.Context, m mode.Mode) error {
- db.modeMtx.Lock()
- defer db.modeMtx.Unlock()
- db.mode = m
- db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
-
- if m.NoMetabase() {
- return nil
- }
- return db.openDB(ctx, m)
-}
-
-func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
+func (db *DB) Open(_ context.Context, readOnly bool) error {
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
if err != nil {
- return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
+ return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
}
- db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
+ db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
db.boltOptions = &opts
}
- db.boltOptions.ReadOnly = mode.ReadOnly()
+ db.boltOptions.ReadOnly = readOnly
- return metaerr.Wrap(db.openBolt(ctx))
+ return metaerr.Wrap(db.openBolt())
}
-func (db *DB) openBolt(ctx context.Context) error {
+func (db *DB) openBolt() error {
var err error
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
if err != nil {
- return fmt.Errorf("open boltDB database: %w", err)
+ return fmt.Errorf("can't open boltDB database: %w", err)
}
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
- db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase)
+ db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
- db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion)
+ db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is
@@ -109,7 +80,7 @@ func (db *DB) openBolt(ctx context.Context) error {
//
// Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state,
// use Reset.
-func (db *DB) Init(_ context.Context) error {
+func (db *DB) Init() error {
return metaerr.Wrap(db.init(false))
}
@@ -131,6 +102,15 @@ func (db *DB) init(reset bool) error {
return nil
}
+ mStaticBuckets := map[string]struct{}{
+ string(containerVolumeBucketName): {},
+ string(graveyardBucketName): {},
+ string(toMoveItBucketName): {},
+ string(garbageBucketName): {},
+ string(shardInfoBucket): {},
+ string(bucketNameLocked): {},
+ }
+
return db.boltDB.Update(func(tx *bbolt.Tx) error {
var err error
if !reset {
@@ -145,43 +125,34 @@ func (db *DB) init(reset bool) error {
if reset {
err := tx.DeleteBucket(name)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("delete static bucket %s: %w", k, err)
+ return fmt.Errorf("could not delete static bucket %s: %w", k, err)
}
}
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
- return fmt.Errorf("create static bucket %s: %w", k, err)
+ return fmt.Errorf("could not create static bucket %s: %w", k, err)
}
}
- for _, b := range deprecatedBuckets {
- err := tx.DeleteBucket(b)
- if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
- return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
- }
- }
-
- if !reset { // counters will be recalculated by refill metabase
+ if !reset {
err = syncCounter(tx, false)
if err != nil {
- return fmt.Errorf("sync object counter: %w", err)
+ return fmt.Errorf("could not sync object counter: %w", err)
}
return nil
}
- bucketCursor := tx.Cursor()
- name, _ := bucketCursor.First()
- for name != nil {
+ err = tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
if _, ok := mStaticBuckets[string(name)]; !ok {
- if err := tx.DeleteBucket(name); err != nil {
- return err
- }
- name, _ = bucketCursor.Seek(name)
- continue
+ return tx.DeleteBucket(name)
}
- name, _ = bucketCursor.Next()
+
+ return nil
+ })
+ if err != nil {
+ return err
}
return updateVersion(tx, version)
})
@@ -203,12 +174,11 @@ func (db *DB) SyncCounters() error {
}))
}
-// Close closes boltDB instance
-// and reports metabase metric.
-func (db *DB) Close(context.Context) error {
+// Close closes boltDB instance.
+func (db *DB) Close() error {
var err error
if db.boltDB != nil {
- err = db.close()
+ err = metaerr.Wrap(db.boltDB.Close())
}
if err == nil {
db.metrics.Close()
@@ -216,17 +186,13 @@ func (db *DB) Close(context.Context) error {
return err
}
-func (db *DB) close() error {
- return metaerr.Wrap(db.boltDB.Close())
-}
-
// Reload reloads part of the configuration.
// It returns true iff database was reopened.
// If a config option is invalid, it logs an error and returns nil.
// If there was a problem with applying new configuration, an error is returned.
//
// If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned.
-func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
+func (db *DB) Reload(opts ...Option) (bool, error) {
var c cfg
for i := range opts {
opts[i](&c)
@@ -236,19 +202,19 @@ func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) {
defer db.modeMtx.Unlock()
if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) {
- if err := db.Close(ctx); err != nil {
+ if err := db.Close(); err != nil {
return false, err
}
- db.mode = mode.Disabled
- db.metrics.SetMode(mode.ComponentDisabled)
+ db.mode = mode.Degraded
+ db.metrics.SetMode(mode.Degraded)
db.info.Path = c.info.Path
- if err := db.openBolt(ctx); err != nil {
+ if err := db.openBolt(); err != nil {
return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err))
}
db.mode = mode.ReadWrite
- db.metrics.SetMode(mode.ComponentReadWrite)
+ db.metrics.SetMode(mode.ReadWrite)
return true, nil
}
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index d26402675..8f36423fd 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -15,7 +15,6 @@ import (
func TestReset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
err := db.Reset()
require.NoError(t, err)
@@ -41,7 +40,7 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.ID())
+ err = metaInhume(db, addrToInhume, oidtest.Address())
require.NoError(t, err)
assertExists(addr, true, nil)
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index 732f99519..c0dc7886e 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -1,33 +1,17 @@
package meta
import (
- "bytes"
- "context"
"encoding/binary"
- "errors"
"fmt"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
)
-var (
- objectPhyCounterKey = []byte("phy_counter")
- objectLogicCounterKey = []byte("logic_counter")
- objectUserCounterKey = []byte("user_counter")
-)
-
-var (
- errInvalidKeyLenght = errors.New("invalid key length")
- errInvalidValueLenght = errors.New("invalid value length")
-)
+var objectPhyCounterKey = []byte("phy_counter")
+var objectLogicCounterKey = []byte("logic_counter")
type objectType uint8
@@ -35,19 +19,23 @@ const (
_ objectType = iota
phy
logical
- user
)
// ObjectCounters groups object counter
// according to metabase state.
type ObjectCounters struct {
- Logic uint64
- Phy uint64
- User uint64
+ logic uint64
+ phy uint64
}
-func (o ObjectCounters) IsZero() bool {
- return o.Phy == 0 && o.Logic == 0 && o.User == 0
+// Logic returns logical object counter.
+func (o ObjectCounters) Logic() uint64 {
+ return o.logic
+}
+
+// Phy returns physical object counter.
+func (o ObjectCounters) Phy() uint64 {
+ return o.phy
}
// ObjectCounters returns object counters that metabase has
@@ -68,17 +56,12 @@ func (db *DB) ObjectCounters() (cc ObjectCounters, err error) {
if b != nil {
data := b.Get(objectPhyCounterKey)
if len(data) == 8 {
- cc.Phy = binary.LittleEndian.Uint64(data)
+ cc.phy = binary.LittleEndian.Uint64(data)
}
data = b.Get(objectLogicCounterKey)
if len(data) == 8 {
- cc.Logic = binary.LittleEndian.Uint64(data)
- }
-
- data = b.Get(objectUserCounterKey)
- if len(data) == 8 {
- cc.User = binary.LittleEndian.Uint64(data)
+ cc.logic = binary.LittleEndian.Uint64(data)
}
}
@@ -88,179 +71,14 @@ func (db *DB) ObjectCounters() (cc ObjectCounters, err error) {
return cc, metaerr.Wrap(err)
}
-type ContainerCounters struct {
- Counts map[cid.ID]ObjectCounters
-}
-
-// ContainerCounters returns object counters for each container
-// that metabase has tracked since it was opened and initialized.
-//
-// Returns only the errors that do not allow reading counter
-// in Bolt database.
-//
-// It is guaranteed that the ContainerCounters fields are not nil.
-func (db *DB) ContainerCounters(ctx context.Context) (ContainerCounters, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("ContainerCounters", time.Since(startedAt), success)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCounters")
- defer span.End()
-
- cc := ContainerCounters{
- Counts: make(map[cid.ID]ObjectCounters),
- }
-
- lastKey := make([]byte, cidSize)
-
- // there is no limit for containers count, so use batching with cancellation
- for {
- select {
- case <-ctx.Done():
- return cc, ctx.Err()
- default:
- }
-
- completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) {
- cc.Counts[id] = entity
- })
- if err != nil {
- return cc, err
- }
- if completed {
- break
- }
- }
-
- success = true
- return cc, nil
-}
-
-func (db *DB) containerCountersNextBatch(lastKey []byte, f func(id cid.ID, entity ObjectCounters)) (bool, error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return false, ErrDegradedMode
- }
-
- counter := 0
- const batchSize = 1000
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(containerCounterBucketName)
- if b == nil {
- return ErrInterruptIterator
- }
- c := b.Cursor()
- var key, value []byte
- for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() {
- if bytes.Equal(lastKey, key) {
- continue
- }
- copy(lastKey, key)
-
- cnrID, err := parseContainerCounterKey(key)
- if err != nil {
- return err
- }
- ent, err := parseContainerCounterValue(value)
- if err != nil {
- return err
- }
- f(cnrID, ent)
-
- counter++
- if counter == batchSize {
- break
- }
- }
-
- if counter < batchSize { // last batch
- return ErrInterruptIterator
- }
- return nil
- })
- if err != nil {
- if errors.Is(err, ErrInterruptIterator) {
- return true, nil
- }
- return false, metaerr.Wrap(err)
- }
- return false, nil
-}
-
-func (db *DB) ContainerCount(ctx context.Context, id cid.ID) (ObjectCounters, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("ContainerCount", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "metabase.ContainerCount")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ObjectCounters{}, ErrDegradedMode
- }
-
- var result ObjectCounters
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(containerCounterBucketName)
- key := make([]byte, cidSize)
- id.Encode(key)
- v := b.Get(key)
- if v == nil {
- return nil
- }
- var err error
- result, err = parseContainerCounterValue(v)
- return err
- })
-
- return result, metaerr.Wrap(err)
-}
-
-func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
- b := tx.Bucket(shardInfoBucket)
- if b == nil {
- return db.incContainerObjectCounter(tx, cnrID, isUserObject)
- }
-
- if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
- return fmt.Errorf("increase phy object counter: %w", err)
- }
- if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
- return fmt.Errorf("increase logical object counter: %w", err)
- }
- if isUserObject {
- if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
- return fmt.Errorf("increase user object counter: %w", err)
- }
- }
- return db.incContainerObjectCounter(tx, cnrID, isUserObject)
-}
-
-func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error {
+// updateCounter updates the object counter. Tx MUST be writable.
+// If inc == `true`, increases the counter, decreases otherwise.
+func (db *DB) updateCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error {
b := tx.Bucket(shardInfoBucket)
if b == nil {
return nil
}
- return db.updateShardObjectCounterBucket(b, typ, delta, false)
-}
-
-func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
var counter uint64
var counterKey []byte
@@ -269,8 +87,6 @@ func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta
counterKey = objectPhyCounterKey
case logical:
counterKey = objectLogicCounterKey
- case user:
- counterKey = objectUserCounterKey
default:
panic("unknown object type counter")
}
@@ -294,65 +110,6 @@ func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta
return b.Put(counterKey, newCounter)
}
-func (db *DB) updateContainerCounter(tx *bbolt.Tx, delta map[cid.ID]ObjectCounters, inc bool) error {
- b := tx.Bucket(containerCounterBucketName)
- if b == nil {
- return nil
- }
-
- key := make([]byte, cidSize)
- for cnrID, cnrDelta := range delta {
- cnrID.Encode(key)
- if err := db.editContainerCounterValue(b, key, cnrDelta, inc); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (*DB) editContainerCounterValue(b *bbolt.Bucket, key []byte, delta ObjectCounters, inc bool) error {
- var entity ObjectCounters
- var err error
- data := b.Get(key)
- if len(data) > 0 {
- entity, err = parseContainerCounterValue(data)
- if err != nil {
- return err
- }
- }
- entity.Phy = nextValue(entity.Phy, delta.Phy, inc)
- entity.Logic = nextValue(entity.Logic, delta.Logic, inc)
- entity.User = nextValue(entity.User, delta.User, inc)
- value := containerCounterValue(entity)
- return b.Put(key, value)
-}
-
-func nextValue(existed, delta uint64, inc bool) uint64 {
- if inc {
- existed += delta
- } else if existed <= delta {
- existed = 0
- } else {
- existed -= delta
- }
- return existed
-}
-
-func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
- b := tx.Bucket(containerCounterBucketName)
- if b == nil {
- return nil
- }
-
- key := make([]byte, cidSize)
- cnrID.Encode(key)
- c := ObjectCounters{Logic: 1, Phy: 1}
- if isUserObject {
- c.User = 1
- }
- return db.editContainerCounterValue(b, key, c, true)
-}
-
// syncCounter updates object counters according to metabase state:
// it counts all the physically/logically stored objects using internal
// indexes. Tx MUST be writable.
@@ -360,390 +117,57 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject
// Does nothing if counters are not empty and force is false. If force is
// true, updates the counters anyway.
func syncCounter(tx *bbolt.Tx, force bool) error {
- shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
+ b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("get shard info bucket: %w", err)
+ return fmt.Errorf("could not get shard info bucket: %w", err)
}
- shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
- len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
- len(shardInfoB.Get(objectUserCounterKey)) == 8
- containerObjectCounterInitialized := containerObjectCounterInitialized(tx)
- if !force && shardObjectCounterInitialized && containerObjectCounterInitialized {
+
+ if !force && len(b.Get(objectPhyCounterKey)) == 8 && len(b.Get(objectLogicCounterKey)) == 8 {
// the counters are already inited
return nil
}
- containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
- if err != nil {
- return fmt.Errorf("get container counter bucket: %w", err)
- }
-
var addr oid.Address
- counters := make(map[cid.ID]ObjectCounters)
+ var phyCounter uint64
+ var logicCounter uint64
graveyardBKT := tx.Bucket(graveyardBucketName)
garbageBKT := tx.Bucket(garbageBucketName)
key := make([]byte, addressKeySize)
- var isAvailable bool
- err = iteratePhyObjects(tx, func(cnr cid.ID, objID oid.ID, obj *objectSDK.Object) error {
- if v, ok := counters[cnr]; ok {
- v.Phy++
- counters[cnr] = v
- } else {
- counters[cnr] = ObjectCounters{
- Phy: 1,
- }
- }
+ err = iteratePhyObjects(tx, func(cnr cid.ID, obj oid.ID) error {
+ phyCounter++
addr.SetContainer(cnr)
- addr.SetObject(objID)
- isAvailable = false
+ addr.SetObject(obj)
// check if an object is available: not with GCMark
// and not covered with a tombstone
if inGraveyardWithKey(addressKey(addr, key), graveyardBKT, garbageBKT) == 0 {
- if v, ok := counters[cnr]; ok {
- v.Logic++
- counters[cnr] = v
- } else {
- counters[cnr] = ObjectCounters{
- Logic: 1,
- }
- }
- isAvailable = true
- }
-
- if isAvailable && IsUserObject(obj) {
- if v, ok := counters[cnr]; ok {
- v.User++
- counters[cnr] = v
- } else {
- counters[cnr] = ObjectCounters{
- User: 1,
- }
- }
+ logicCounter++
}
return nil
})
if err != nil {
- return fmt.Errorf("iterate objects: %w", err)
+ return fmt.Errorf("could not iterate objects: %w", err)
}
- return setObjectCounters(counters, shardInfoB, containerCounterB)
-}
+ data := make([]byte, 8)
+ binary.LittleEndian.PutUint64(data, phyCounter)
-func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, containerCounterB *bbolt.Bucket) error {
- var phyTotal uint64
- var logicTotal uint64
- var userTotal uint64
- key := make([]byte, cidSize)
- for cnrID, count := range counters {
- phyTotal += count.Phy
- logicTotal += count.Logic
- userTotal += count.User
-
- cnrID.Encode(key)
- value := containerCounterValue(count)
- err := containerCounterB.Put(key, value)
- if err != nil {
- return fmt.Errorf("update phy container object counter: %w", err)
- }
- }
- phyData := make([]byte, 8)
- binary.LittleEndian.PutUint64(phyData, phyTotal)
-
- err := shardInfoB.Put(objectPhyCounterKey, phyData)
+ err = b.Put(objectPhyCounterKey, data)
if err != nil {
- return fmt.Errorf("update phy object counter: %w", err)
+ return fmt.Errorf("could not update phy object counter: %w", err)
}
- logData := make([]byte, 8)
- binary.LittleEndian.PutUint64(logData, logicTotal)
+ data = make([]byte, 8)
+ binary.LittleEndian.PutUint64(data, logicCounter)
- err = shardInfoB.Put(objectLogicCounterKey, logData)
+ err = b.Put(objectLogicCounterKey, data)
if err != nil {
- return fmt.Errorf("update logic object counter: %w", err)
- }
-
- userData := make([]byte, 8)
- binary.LittleEndian.PutUint64(userData, userTotal)
-
- err = shardInfoB.Put(objectUserCounterKey, userData)
- if err != nil {
- return fmt.Errorf("update user object counter: %w", err)
+ return fmt.Errorf("could not update logic object counter: %w", err)
}
return nil
}
-
-func containerCounterValue(entity ObjectCounters) []byte {
- res := make([]byte, 24)
- binary.LittleEndian.PutUint64(res, entity.Phy)
- binary.LittleEndian.PutUint64(res[8:], entity.Logic)
- binary.LittleEndian.PutUint64(res[16:], entity.User)
- return res
-}
-
-func parseContainerCounterKey(buf []byte) (cid.ID, error) {
- if len(buf) != cidSize {
- return cid.ID{}, errInvalidKeyLenght
- }
- var cnrID cid.ID
- if err := cnrID.Decode(buf); err != nil {
- return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
- }
- return cnrID, nil
-}
-
-// parseContainerCounterValue return phy, logic values.
-func parseContainerCounterValue(buf []byte) (ObjectCounters, error) {
- if len(buf) != 24 {
- return ObjectCounters{}, errInvalidValueLenght
- }
- return ObjectCounters{
- Phy: binary.LittleEndian.Uint64(buf),
- Logic: binary.LittleEndian.Uint64(buf[8:16]),
- User: binary.LittleEndian.Uint64(buf[16:]),
- }, nil
-}
-
-func containerObjectCounterInitialized(tx *bbolt.Tx) bool {
- b := tx.Bucket(containerCounterBucketName)
- if b == nil {
- return false
- }
- k, v := b.Cursor().First()
- if k == nil && v == nil {
- return true
- }
- _, err := parseContainerCounterKey(k)
- if err != nil {
- return false
- }
- _, err = parseContainerCounterValue(v)
- return err == nil
-}
-
-func IsUserObject(obj *objectSDK.Object) bool {
- ech := obj.ECHeader()
- if ech == nil {
- _, hasParentID := obj.ParentID()
- return obj.Type() == objectSDK.TypeRegular &&
- (obj.SplitID() == nil ||
- (hasParentID && len(obj.Children()) == 0))
- }
- return ech.Index() == 0 && (ech.ParentSplitID() == nil || ech.ParentSplitParentID() != nil)
-}
-
-// ZeroSizeContainers returns containers with size = 0.
-func (db *DB) ZeroSizeContainers(ctx context.Context) ([]cid.ID, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("ZeroSizeContainers", time.Since(startedAt), success)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroSizeContainers")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- var result []cid.ID
- lastKey := make([]byte, cidSize)
-
- for {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- completed, err := db.containerSizesNextBatch(lastKey, func(contID cid.ID, size uint64) {
- if size == 0 {
- result = append(result, contID)
- }
- })
- if err != nil {
- return nil, err
- }
- if completed {
- break
- }
- }
-
- success = true
- return result, nil
-}
-
-func (db *DB) containerSizesNextBatch(lastKey []byte, f func(cid.ID, uint64)) (bool, error) {
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return false, ErrDegradedMode
- }
-
- counter := 0
- const batchSize = 1000
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(containerVolumeBucketName)
- c := b.Cursor()
- var key, value []byte
- for key, value = c.Seek(lastKey); key != nil; key, value = c.Next() {
- if bytes.Equal(lastKey, key) {
- continue
- }
- copy(lastKey, key)
-
- size := parseContainerSize(value)
- var id cid.ID
- if err := id.Decode(key); err != nil {
- return err
- }
- f(id, size)
-
- counter++
- if counter == batchSize {
- break
- }
- }
-
- if counter < batchSize {
- return ErrInterruptIterator
- }
- return nil
- })
- if err != nil {
- if errors.Is(err, ErrInterruptIterator) {
- return true, nil
- }
- return false, metaerr.Wrap(err)
- }
- return false, nil
-}
-
-func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("DeleteContainerSize", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerSize",
- trace.WithAttributes(
- attribute.Stringer("container_id", id),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- if db.mode.ReadOnly() {
- return ErrReadOnlyMode
- }
-
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(containerVolumeBucketName)
-
- key := make([]byte, cidSize)
- id.Encode(key)
- return b.Delete(key)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-// ZeroCountContainers returns containers with objects count = 0 in metabase.
-func (db *DB) ZeroCountContainers(ctx context.Context) ([]cid.ID, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("ZeroCountContainers", time.Since(startedAt), success)
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "metabase.ZeroCountContainers")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- var result []cid.ID
-
- lastKey := make([]byte, cidSize)
- for {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- completed, err := db.containerCountersNextBatch(lastKey, func(id cid.ID, entity ObjectCounters) {
- if entity.IsZero() {
- result = append(result, id)
- }
- })
- if err != nil {
- return nil, metaerr.Wrap(err)
- }
- if completed {
- break
- }
- }
- success = true
- return result, nil
-}
-
-func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("DeleteContainerCount", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "metabase.DeleteContainerCount",
- trace.WithAttributes(
- attribute.Stringer("container_id", id),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- if db.mode.ReadOnly() {
- return ErrReadOnlyMode
- }
-
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(containerCounterBucketName)
-
- key := make([]byte, cidSize)
- id.Encode(key)
- return b.Delete(key)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index 950385a29..89b52c887 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -7,7 +7,6 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -22,117 +21,62 @@ func TestCounters(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Zero(t, c.Phy)
- require.Zero(t, c.Logic)
- require.Zero(t, c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Zero(t, len(cc.Counts))
+ require.Zero(t, c.Phy())
+ require.Zero(t, c.Logic())
})
t.Run("put", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := make([]*objectSDK.Object, 0, objCount)
- for range objCount {
+ for i := 0; i < objCount; i++ {
oo = append(oo, testutil.GenerateObject())
}
var prm meta.PutPrm
- exp := make(map[cid.ID]meta.ObjectCounters)
- for i := range objCount {
+ for i := 0; i < objCount; i++ {
prm.SetObject(oo[i])
- cnrID, _ := oo[i].ContainerID()
- c := meta.ObjectCounters{}
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
_, err := db.Put(context.Background(), prm)
require.NoError(t, err)
- c, err = db.ObjectCounters()
+ c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy)
- require.Equal(t, uint64(i+1), c.Logic)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(i+1), c.Phy())
+ require.Equal(t, uint64(i+1), c.Logic())
}
})
t.Run("delete", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
- exp := make(map[cid.ID]meta.ObjectCounters)
- for _, obj := range oo {
- cnrID, _ := obj.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
- }
-
var prm meta.DeletePrm
for i := objCount - 1; i >= 0; i-- {
prm.SetAddresses(objectcore.AddressOf(oo[i]))
res, err := db.Delete(context.Background(), prm)
require.NoError(t, err)
- require.Equal(t, uint64(1), res.LogicCount())
+ require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i), c.Phy)
- require.Equal(t, uint64(i), c.Logic)
- require.Equal(t, uint64(i), c.User)
-
- cnrID, _ := oo[i].ContainerID()
- if v, ok := exp[cnrID]; ok {
- v.Phy--
- v.Logic--
- v.User--
- exp[cnrID] = v
- }
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(i), c.Phy())
+ require.Equal(t, uint64(i), c.Logic())
}
})
t.Run("inhume", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, false)
- exp := make(map[cid.ID]meta.ObjectCounters)
- for _, obj := range oo {
- cnrID, _ := obj.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
- }
-
inhumedObjs := make([]oid.Address, objCount/2)
for i, o := range oo {
@@ -143,141 +87,66 @@ func TestCounters(t *testing.T) {
inhumedObjs[i] = objectcore.AddressOf(o)
}
- for _, addr := range inhumedObjs {
- if v, ok := exp[addr.Container()]; ok {
- v.Logic--
- v.User--
- if v.IsZero() {
- delete(exp, addr.Container())
- } else {
- exp[addr.Container()] = v
- }
- }
- }
-
var prm meta.InhumePrm
- for _, o := range inhumedObjs {
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(o.Container())
+ prm.SetTombstoneAddress(oidtest.Address())
+ prm.SetAddresses(inhumedObjs...)
- prm.SetTombstoneAddress(tombAddr)
- prm.SetAddresses(o)
-
- res, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(1), res.LogicInhumed())
- require.Equal(t, uint64(1), res.UserInhumed())
- }
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy)
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic)
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(objCount), c.Phy())
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic())
})
t.Run("put_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
parObj := testutil.GenerateObject()
- exp := make(map[cid.ID]meta.ObjectCounters)
-
// put objects and check that parent info
// does not affect the counter
- for i := range objCount {
+ for i := 0; i < objCount; i++ {
o := testutil.GenerateObject()
if i < objCount/2 { // half of the objs will have the parent
o.SetParent(parObj)
- o.SetSplitID(objectSDK.NewSplitID())
- }
-
- cnrID, _ := o.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
}
require.NoError(t, putBig(db, o))
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy)
- require.Equal(t, uint64(i+1), c.Logic)
- require.Equal(t, uint64(i+1), c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(i+1), c.Phy())
+ require.Equal(t, uint64(i+1), c.Logic())
}
})
t.Run("delete_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
- exp := make(map[cid.ID]meta.ObjectCounters)
- for _, obj := range oo {
- cnrID, _ := obj.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
- }
-
// delete objects that have parent info
// and check that it does not affect
// the counter
for i, o := range oo {
- addr := objectcore.AddressOf(o)
- require.NoError(t, metaDelete(db, addr))
+ require.NoError(t, metaDelete(db, objectcore.AddressOf(o)))
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount-i-1), c.Phy)
- require.Equal(t, uint64(objCount-i-1), c.Logic)
- require.Equal(t, uint64(objCount-i-1), c.User)
-
- if v, ok := exp[addr.Container()]; ok {
- v.Logic--
- v.Phy--
- v.User--
- if v.IsZero() {
- delete(exp, addr.Container())
- } else {
- exp[addr.Container()] = v
- }
- }
+ require.Equal(t, uint64(objCount-i-1), c.Phy())
+ require.Equal(t, uint64(objCount-i-1), c.Logic())
}
})
t.Run("inhume_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := putObjs(t, db, objCount, true)
- exp := make(map[cid.ID]meta.ObjectCounters)
- for _, obj := range oo {
- cnrID, _ := obj.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
- }
-
inhumedObjs := make([]oid.Address, objCount/2)
for i, o := range oo {
@@ -288,93 +157,21 @@ func TestCounters(t *testing.T) {
inhumedObjs[i] = objectcore.AddressOf(o)
}
- for _, addr := range inhumedObjs {
- if v, ok := exp[addr.Container()]; ok {
- v.Logic--
- v.User--
- if v.IsZero() {
- delete(exp, addr.Container())
- } else {
- exp[addr.Container()] = v
- }
- }
- }
-
var prm meta.InhumePrm
- for _, o := range inhumedObjs {
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(o.Container())
+ prm.SetTombstoneAddress(oidtest.Address())
+ prm.SetAddresses(inhumedObjs...)
- prm.SetTombstoneAddress(tombAddr)
- prm.SetAddresses(o)
-
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- }
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy)
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic)
- require.Equal(t, uint64(objCount-len(inhumedObjs)), c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(objCount), c.Phy())
+ require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic())
})
}
-func TestDoublePut(t *testing.T) {
- t.Parallel()
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
- obj := testutil.GenerateObject()
-
- exp := make(map[cid.ID]meta.ObjectCounters)
- cnrID, _ := obj.ContainerID()
- exp[cnrID] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
-
- var prm meta.PutPrm
- prm.SetObject(obj)
- pr, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
- require.True(t, pr.Inserted)
-
- c, err := db.ObjectCounters()
- require.NoError(t, err)
-
- require.Equal(t, uint64(1), c.Phy)
- require.Equal(t, uint64(1), c.Logic)
- require.Equal(t, uint64(1), c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
-
- pr, err = db.Put(context.Background(), prm)
- require.NoError(t, err)
- require.False(t, pr.Inserted)
-
- c, err = db.ObjectCounters()
- require.NoError(t, err)
-
- require.Equal(t, uint64(1), c.Phy)
- require.Equal(t, uint64(1), c.Logic)
- require.Equal(t, uint64(1), c.User)
-
- cc, err = db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
-}
-
func TestCounters_Expired(t *testing.T) {
// That test is about expired objects without
// GCMark yet. Such objects should be treated as
@@ -387,34 +184,18 @@ func TestCounters_Expired(t *testing.T) {
es := &epochState{epoch}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
oo := make([]oid.Address, objCount)
for i := range oo {
oo[i] = putWithExpiration(t, db, objectSDK.TypeRegular, epoch+1)
}
- exp := make(map[cid.ID]meta.ObjectCounters)
- for _, addr := range oo {
- exp[addr.Container()] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
- }
-
// 1. objects are available and counters are correct
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy)
- require.Equal(t, uint64(objCount), c.Logic)
- require.Equal(t, uint64(objCount), c.User)
-
- cc, err := db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(objCount), c.Phy())
+ require.Equal(t, uint64(objCount), c.Logic())
for _, o := range oo {
_, err := metaGet(db, o, true)
@@ -428,14 +209,8 @@ func TestCounters_Expired(t *testing.T) {
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(objCount), c.Phy)
- require.Equal(t, uint64(objCount), c.Logic)
- require.Equal(t, uint64(objCount), c.User)
-
- cc, err = db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(objCount), c.Phy())
+ require.Equal(t, uint64(objCount), c.Logic())
for _, o := range oo {
_, err := metaGet(db, o, true)
@@ -452,30 +227,13 @@ func TestCounters_Expired(t *testing.T) {
inhumeRes, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- require.Equal(t, uint64(1), inhumeRes.LogicInhumed())
- require.Equal(t, uint64(1), inhumeRes.UserInhumed())
+ require.Equal(t, uint64(1), inhumeRes.AvailableInhumed())
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy)
- require.Equal(t, uint64(len(oo)-1), c.Logic)
- require.Equal(t, uint64(len(oo)-1), c.User)
-
- if v, ok := exp[oo[0].Container()]; ok {
- v.Logic--
- v.User--
- if v.IsZero() {
- delete(exp, oo[0].Container())
- } else {
- exp[oo[0].Container()] = v
- }
- }
-
- cc, err = db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(len(oo)), c.Phy())
+ require.Equal(t, uint64(len(oo)-1), c.Logic())
// 4. `Delete` an object with GCMark should decrease the
// phy counter but does not affect the logic counter (after
@@ -486,26 +244,14 @@ func TestCounters_Expired(t *testing.T) {
deleteRes, err := db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
- require.Zero(t, deleteRes.LogicCount())
- require.Zero(t, deleteRes.UserCount())
-
- if v, ok := exp[oo[0].Container()]; ok {
- v.Phy--
- exp[oo[0].Container()] = v
- }
+ require.Zero(t, deleteRes.AvailableObjectsRemoved())
oo = oo[1:]
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy)
- require.Equal(t, uint64(len(oo)), c.Logic)
- require.Equal(t, uint64(len(oo)), c.User)
-
- cc, err = db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(len(oo)), c.Phy())
+ require.Equal(t, uint64(len(oo)), c.Logic())
// 5 `Delete` an expired object (like it would the control
// service do) should decrease both counters despite the
@@ -515,28 +261,14 @@ func TestCounters_Expired(t *testing.T) {
deleteRes, err = db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
- require.Equal(t, uint64(1), deleteRes.LogicCount())
- require.Equal(t, uint64(1), deleteRes.UserCount())
-
- if v, ok := exp[oo[0].Container()]; ok {
- v.Phy--
- v.Logic--
- v.User--
- exp[oo[0].Container()] = v
- }
+ require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved())
oo = oo[1:]
c, err = db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(len(oo)), c.Phy)
- require.Equal(t, uint64(len(oo)), c.Logic)
- require.Equal(t, uint64(len(oo)), c.User)
-
- cc, err = db.ContainerCounters(context.Background())
- require.NoError(t, err)
-
- require.Equal(t, meta.ContainerCounters{Counts: exp}, cc)
+ require.Equal(t, uint64(len(oo)), c.Phy())
+ require.Equal(t, uint64(len(oo)), c.Logic())
}
func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK.Object {
@@ -545,11 +277,10 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK
parent := testutil.GenerateObject()
oo := make([]*objectSDK.Object, 0, count)
- for i := range count {
+ for i := 0; i < count; i++ {
o := testutil.GenerateObject()
if withParent {
o.SetParent(parent)
- o.SetSplitID(objectSDK.NewSplitID())
}
oo = append(oo, o)
@@ -561,8 +292,8 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK
c, err := db.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, uint64(i+1), c.Phy)
- require.Equal(t, uint64(i+1), c.Logic)
+ require.Equal(t, uint64(i+1), c.Phy())
+ require.Equal(t, uint64(i+1), c.Logic())
}
return oo
diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go
index 4474aa229..f5341ff2e 100644
--- a/pkg/local_object_storage/metabase/db.go
+++ b/pkg/local_object_storage/metabase/db.go
@@ -11,9 +11,9 @@ import (
"sync"
"time"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/mr-tron/base58"
"go.etcd.io/bbolt"
@@ -70,7 +70,7 @@ func defaultCfg() *cfg {
},
boltBatchDelay: bbolt.DefaultMaxBatchDelay,
boltBatchSize: bbolt.DefaultMaxBatchSize,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
metrics: &noopMetrics{},
}
}
@@ -107,7 +107,6 @@ func New(opts ...Option) *DB {
matchBucket: stringCommonPrefixMatcherBucket,
},
},
- mode: mode.Disabled,
}
}
@@ -263,7 +262,7 @@ func unknownMatcherBucket(_ *bbolt.Bucket, _ string, _ string, _ func([]byte, []
// in boltDB. Useful for getting filter values from unique and list indexes.
func bucketKeyHelper(hdr string, val string) []byte {
switch hdr {
- case v2object.FilterHeaderParent, v2object.FilterHeaderECParent:
+ case v2object.FilterHeaderParent:
v, err := base58.Decode(val)
if err != nil {
return nil
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index edaeb13c5..bc5015b60 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -2,14 +2,14 @@ package meta_test
import (
"context"
+ "os"
"path/filepath"
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -32,17 +32,7 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error {
}
func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs, false)
- require.NoError(t, err)
- require.Len(t, res, len(exp))
-
- for i := range exp {
- require.Contains(t, res, exp[i])
- }
-}
-
-func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs, useAttrIndex)
+ res, err := metaSelect(db, cnr, fs)
require.NoError(t, err)
require.Len(t, res, len(exp))
@@ -55,13 +45,18 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
bdb := meta.New(
append([]meta.Option{
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
- meta.WithPermissions(0o600),
+ meta.WithPermissions(0600),
meta.WithEpochState(epochState{}),
}, opts...)...,
)
- require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bdb.Init(context.Background()))
+ require.NoError(t, bdb.Open(context.Background(), false))
+ require.NoError(t, bdb.Init())
+
+ t.Cleanup(func() {
+ bdb.Close()
+ os.Remove(bdb.DumpInfo().Path)
+ })
return bdb
}
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 9a5a6e574..f7ff7a129 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -20,8 +19,6 @@ import (
"go.opentelemetry.io/otel/trace"
)
-var errFailedToRemoveUniqueIndexes = errors.New("can't remove unique indexes")
-
// DeletePrm groups the parameters of Delete operation.
type DeletePrm struct {
addrs []oid.Address
@@ -29,42 +26,31 @@ type DeletePrm struct {
// DeleteRes groups the resulting values of Delete operation.
type DeleteRes struct {
- phyCount uint64
- logicCount uint64
- userCount uint64
- phySize uint64
- logicSize uint64
- removedByCnrID map[cid.ID]ObjectCounters
+ rawRemoved uint64
+ availableRemoved uint64
+ sizes []uint64
+ availableSizes []uint64
}
-// LogicCount returns the number of removed logic
+// AvailableObjectsRemoved returns the number of removed available
// objects.
-func (d DeleteRes) LogicCount() uint64 {
- return d.logicCount
+func (d DeleteRes) AvailableObjectsRemoved() uint64 {
+ return d.availableRemoved
}
-func (d DeleteRes) UserCount() uint64 {
- return d.userCount
+// RawObjectsRemoved returns the number of removed raw objects.
+func (d DeleteRes) RawObjectsRemoved() uint64 {
+ return d.rawRemoved
}
-// RemovedByCnrID returns the number of removed objects by container ID.
-func (d DeleteRes) RemovedByCnrID() map[cid.ID]ObjectCounters {
- return d.removedByCnrID
+// RemovedPhysicalObjectSizes returns the sizes of removed physical objects.
+func (d DeleteRes) RemovedPhysicalObjectSizes() []uint64 {
+ return d.sizes
}
-// PhyCount returns the number of removed physical objects.
-func (d DeleteRes) PhyCount() uint64 {
- return d.phyCount
-}
-
-// PhySize returns the size of removed physical objects.
-func (d DeleteRes) PhySize() uint64 {
- return d.phySize
-}
-
-// LogicSize returns the size of removed logical objects.
-func (d DeleteRes) LogicSize() uint64 {
- return d.logicSize
+// RemovedLogicalObjectSizes returns the sizes of removed logical objects.
+func (d DeleteRes) RemovedLogicalObjectSizes() []uint64 {
+ return d.availableSizes
}
// SetAddresses is a Delete option to set the addresses of the objects to delete.
@@ -77,6 +63,8 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
+ addr oid.Address
+
obj *objectSDK.Object
}
@@ -107,134 +95,87 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
return DeleteRes{}, ErrReadOnlyMode
}
+ var rawRemoved uint64
+ var availableRemoved uint64
var err error
- var res DeleteRes
+ var sizes = make([]uint64, len(prm.addrs))
+ var availableSizes = make([]uint64, len(prm.addrs))
- err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
- res, err = db.deleteGroup(tx, prm.addrs)
+ err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ // We need to clear slice because tx can try to execute multiple times.
+ rawRemoved, availableRemoved, err = db.deleteGroup(tx, prm.addrs, sizes, availableSizes)
return err
})
if err == nil {
deleted = true
for i := range prm.addrs {
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(prm.addrs[i]),
storagelog.OpField("metabase DELETE"))
}
}
- return res, metaerr.Wrap(err)
+ return DeleteRes{
+ rawRemoved: rawRemoved,
+ availableRemoved: availableRemoved,
+ sizes: sizes,
+ availableSizes: availableSizes,
+ }, metaerr.Wrap(err)
}
// deleteGroup deletes object from the metabase. Handles removal of the
// references of the split objects.
-func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) {
- res := DeleteRes{
- removedByCnrID: make(map[cid.ID]ObjectCounters),
- }
+// The first return value is a physical objects removed number: physical
+// objects that were stored. The second return value is a logical objects
+// removed number: objects that were available (without Tombstones, GCMarks
+// non-expired, etc.)
+func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address, sizes []uint64, availableSizes []uint64) (uint64, uint64, error) {
refCounter := make(referenceCounter, len(addrs))
currEpoch := db.epochState.CurrentEpoch()
+ var rawDeleted uint64
+ var availableDeleted uint64
+
for i := range addrs {
- r, err := db.delete(tx, addrs[i], refCounter, currEpoch)
+ removed, available, size, err := db.delete(tx, addrs[i], refCounter, currEpoch)
if err != nil {
- return DeleteRes{}, err
+ return 0, 0, err // maybe log and continue?
}
- applyDeleteSingleResult(r, &res, addrs, i)
+ if removed {
+ rawDeleted++
+ sizes[i] = size
+ }
+
+ if available {
+ availableDeleted++
+ availableSizes[i] = size
+ }
}
- if err := db.updateCountersDelete(tx, res); err != nil {
- return DeleteRes{}, err
+ if rawDeleted > 0 {
+ err := db.updateCounter(tx, phy, rawDeleted, false)
+ if err != nil {
+ return 0, 0, fmt.Errorf("could not decrease phy object counter: %w", err)
+ }
+ }
+
+ if availableDeleted > 0 {
+ err := db.updateCounter(tx, logical, availableDeleted, false)
+ if err != nil {
+ return 0, 0, fmt.Errorf("could not decrease logical object counter: %w", err)
+ }
}
for _, refNum := range refCounter {
if refNum.cur == refNum.all {
err := db.deleteObject(tx, refNum.obj, true)
if err != nil {
- return DeleteRes{}, err
+ return rawDeleted, availableDeleted, err // maybe log and continue?
}
}
}
- return res, nil
-}
-
-func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
- if res.phyCount > 0 {
- err := db.decShardObjectCounter(tx, phy, res.phyCount)
- if err != nil {
- return fmt.Errorf("decrease phy object counter: %w", err)
- }
- }
-
- if res.logicCount > 0 {
- err := db.decShardObjectCounter(tx, logical, res.logicCount)
- if err != nil {
- return fmt.Errorf("decrease logical object counter: %w", err)
- }
- }
-
- if res.userCount > 0 {
- err := db.decShardObjectCounter(tx, user, res.userCount)
- if err != nil {
- return fmt.Errorf("decrease user object counter: %w", err)
- }
- }
-
- if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
- return fmt.Errorf("decrease container object counter: %w", err)
- }
- return nil
-}
-
-func applyDeleteSingleResult(r deleteSingleResult, res *DeleteRes, addrs []oid.Address, i int) {
- if r.Phy {
- if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
- v.Phy++
- res.removedByCnrID[addrs[i].Container()] = v
- } else {
- res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
- Phy: 1,
- }
- }
-
- res.phyCount++
- res.phySize += r.Size
- }
-
- if r.Logic {
- if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
- v.Logic++
- res.removedByCnrID[addrs[i].Container()] = v
- } else {
- res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
- Logic: 1,
- }
- }
-
- res.logicCount++
- res.logicSize += r.Size
- }
-
- if r.User {
- if v, ok := res.removedByCnrID[addrs[i].Container()]; ok {
- v.User++
- res.removedByCnrID[addrs[i].Container()] = v
- } else {
- res.removedByCnrID[addrs[i].Container()] = ObjectCounters{
- User: 1,
- }
- }
-
- res.userCount++
- }
-}
-
-type deleteSingleResult struct {
- Phy bool
- Logic bool
- User bool
- Size uint64
+ return rawDeleted, availableDeleted, nil
}
// delete removes object indexes from the metabase. Counts the references
@@ -242,8 +183,8 @@ type deleteSingleResult struct {
// The first return value indicates if an object has been removed. (removing a
// non-exist object is error-free). The second return value indicates if an
// object was available before the removal (for calculating the logical object
-// counter). The third return value The fourth return value is removed object payload size.
-func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter, currEpoch uint64) (deleteSingleResult, error) {
+// counter). The third return value is removed object payload size.
+func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter, currEpoch uint64) (bool, bool, uint64, error) {
key := make([]byte, addressKeySize)
addrKey := addressKey(addr, key)
garbageBKT := tx.Bucket(garbageBucketName)
@@ -251,39 +192,26 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
removeAvailableObject := inGraveyardWithKey(addrKey, graveyardBKT, garbageBKT) == 0
- // unmarshal object, work only with physically stored (raw == true) objects
- obj, err := db.get(tx, addr, key, false, true, currEpoch)
- if err != nil {
- if client.IsErrObjectNotFound(err) {
- addrKey = addressKey(addr, key)
- if garbageBKT != nil {
- err := garbageBKT.Delete(addrKey)
- if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
- }
- }
- return deleteSingleResult{}, nil
- }
- var siErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
- if errors.As(err, &siErr) || errors.As(err, &ecErr) {
- // if object is virtual (parent) then do nothing, it will be deleted with last child
- // if object is erasure-coded it will be deleted with the last chunk presented on the shard
- return deleteSingleResult{}, nil
- }
-
- return deleteSingleResult{}, err
- }
-
- addrKey = addressKey(addr, key)
// remove record from the garbage bucket
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
+ return false, false, 0, fmt.Errorf("could not remove from garbage bucket: %w", err)
}
}
+ // unmarshal object, work only with physically stored (raw == true) objects
+ obj, err := db.get(tx, addr, key, false, true, currEpoch)
+ if err != nil {
+ var siErr *objectSDK.SplitInfoError
+
+ if client.IsErrObjectNotFound(err) || errors.As(err, &siErr) {
+ return false, false, 0, nil
+ }
+
+ return false, false, 0, err
+ }
+
// if object is an only link to a parent, then remove parent
if parent := obj.Parent(); parent != nil {
parAddr := object.AddressOf(parent)
@@ -293,8 +221,9 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- obj: parent,
+ all: parentLength(tx, parAddr),
+ addr: parAddr,
+ obj: parent,
}
refCounter[k] = nRef
@@ -303,24 +232,13 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef.cur++
}
- isUserObject := IsUserObject(obj)
-
// remove object
err = db.deleteObject(tx, obj, false)
if err != nil {
- return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
+ return false, false, 0, fmt.Errorf("could not remove object: %w", err)
}
- if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
- return deleteSingleResult{}, err
- }
-
- return deleteSingleResult{
- Phy: true,
- Logic: removeAvailableObject,
- User: isUserObject && removeAvailableObject,
- Size: obj.PayloadSize(),
- }, nil
+ return true, removeAvailableObject, obj.PayloadSize(), nil
}
func (db *DB) deleteObject(
@@ -330,30 +248,17 @@ func (db *DB) deleteObject(
) error {
err := delUniqueIndexes(tx, obj, isParent)
if err != nil {
- return errFailedToRemoveUniqueIndexes
+ return fmt.Errorf("can't remove unique indexes")
}
err = updateListIndexes(tx, obj, delListIndexItem)
if err != nil {
- return fmt.Errorf("remove list indexes: %w", err)
+ return fmt.Errorf("can't remove list indexes: %w", err)
}
err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
if err != nil {
- return fmt.Errorf("remove fake bucket tree indexes: %w", err)
- }
-
- if isParent {
- // remove record from the garbage bucket, because regular object deletion does nothing for virtual object
- garbageBKT := tx.Bucket(garbageBucketName)
- if garbageBKT != nil {
- key := make([]byte, addressKeySize)
- addrKey := addressKey(object.AddressOf(obj), key)
- err := garbageBKT.Delete(addrKey)
- if err != nil {
- return fmt.Errorf("remove from garbage bucket: %w", err)
- }
- }
+ return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
}
return nil
@@ -363,12 +268,12 @@ func (db *DB) deleteObject(
func parentLength(tx *bbolt.Tx, addr oid.Address) int {
bucketName := make([]byte, bucketKeySize)
- bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName))
+ bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:]))
if bkt == nil {
return 0
}
- lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName)))
+ lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:])))
if err != nil {
return 0
}
@@ -376,11 +281,25 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int {
return len(lst)
}
-func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
bkt := tx.Bucket(item.name)
if bkt != nil {
- return bkt.Delete(item.key)
+ _ = bkt.Delete(item.key) // ignore error, best effort there
}
+}
+
+func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt := tx.Bucket(item.name)
+ if bkt == nil {
+ return nil
+ }
+
+ fkbtRoot := bkt.Bucket(item.key)
+ if fkbtRoot == nil {
+ return nil
+ }
+
+ _ = fkbtRoot.Delete(item.val) // ignore error, best effort there
return nil
}
@@ -406,62 +325,26 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
// if list empty, remove the key from bucket
if len(lst) == 0 {
- return bkt.Delete(item.key)
+ _ = bkt.Delete(item.key) // ignore error, best effort there
+
+ return nil
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
- return err
+ return nil // ignore error, best effort there
}
- return bkt.Put(item.key, encodedLst)
-}
-
-func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt := tx.Bucket(item.name)
- if bkt == nil {
- return nil
- }
-
- fkbtRoot := bkt.Bucket(item.key)
- if fkbtRoot == nil {
- return nil
- }
-
- if err := fkbtRoot.Delete(item.val); err != nil {
- return err
- }
-
- if hasAnyItem(fkbtRoot) {
- return nil
- }
-
- if err := bkt.DeleteBucket(item.key); err != nil {
- return err
- }
-
- if hasAnyItem(bkt) {
- return nil
- }
-
- return tx.DeleteBucket(item.name)
-}
-
-func hasAnyItem(b *bbolt.Bucket) bool {
- var hasAnyItem bool
- c := b.Cursor()
- for k, _ := c.First(); k != nil; {
- hasAnyItem = true
- break
- }
- return hasAnyItem
+ _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
+ return nil
}
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
addr := object.AddressOf(obj)
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
+ addrKey := addressKey(addr, make([]byte, addressKeySize))
cnr := addr.Container()
bucketName := make([]byte, bucketKeySize)
@@ -478,120 +361,29 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
- }); err != nil {
- return err
- }
+ })
} else {
- if err := delUniqueIndexItem(tx, namedBucketItem{
+ delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
+ })
}
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
+ })
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
- }); err != nil {
- return err
- }
-
- if expEpoch, ok := hasExpirationEpoch(obj); ok {
- if err := delUniqueIndexItem(tx, namedBucketItem{
- name: expEpochToObjectBucketName,
- key: expirationEpochKey(expEpoch, cnr, addr.Object()),
- }); err != nil {
- return err
- }
- if err := delUniqueIndexItem(tx, namedBucketItem{
- name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
- key: objKey,
- }); err != nil {
- return err
- }
- }
+ })
+ delUniqueIndexItem(tx, namedBucketItem{ // remove from ToMoveIt index
+ name: toMoveItBucketName,
+ key: addrKey,
+ })
return nil
}
-
-func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.Object, cnr cid.ID, refCounter referenceCounter) error {
- ech := obj.ECHeader()
- if ech == nil {
- return nil
- }
-
- hasAnyChunks := hasAnyECChunks(tx, ech, cnr)
- // drop EC parent GC mark if current EC chunk is the last one
- if !hasAnyChunks && garbageBKT != nil {
- var ecParentAddress oid.Address
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ech.Parent())
- addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
- err := garbageBKT.Delete(addrKey)
- if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
- }
- }
-
- // also drop EC parent root info if current EC chunk is the last one
- if !hasAnyChunks {
- if err := delUniqueIndexItem(tx, namedBucketItem{
- name: rootBucketName(cnr, make([]byte, bucketKeySize)),
- key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- }); err != nil {
- return err
- }
- }
-
- if ech.ParentSplitParentID() == nil {
- return nil
- }
-
- var splitParentAddress oid.Address
- splitParentAddress.SetContainer(cnr)
- splitParentAddress.SetObject(*ech.ParentSplitParentID())
-
- if ref, ok := refCounter[string(addressKey(splitParentAddress, make([]byte, addressKeySize)))]; ok {
- // linking object is already processing
- // so just inform that one more reference was deleted
- // split info and gc marks will be deleted after linking object delete
- ref.cur++
- return nil
- }
-
- if parentLength(tx, splitParentAddress) > 0 {
- // linking object still exists, so leave split info and gc mark deletion for linking object processing
- return nil
- }
-
- // drop split parent gc mark
- if garbageBKT != nil {
- addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
- err := garbageBKT.Delete(addrKey)
- if err != nil {
- return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
- }
- }
-
- // drop split info
- return delUniqueIndexItem(tx, namedBucketItem{
- name: rootBucketName(cnr, make([]byte, bucketKeySize)),
- key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
- })
-}
-
-func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {
- data := getFromBucket(tx, ecInfoBucketName(cnr, make([]byte, bucketKeySize)),
- objectKey(ech.Parent(), make([]byte, objectKeySize)))
- return len(data) > 0
-}
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
deleted file mode 100644
index 884da23ff..000000000
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ /dev/null
@@ -1,443 +0,0 @@
-package meta
-
-import (
- "bytes"
- "context"
- "fmt"
- "path/filepath"
- "slices"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
-)
-
-func TestDeleteECObject_WithoutSplit(t *testing.T) {
- t.Parallel()
-
- db := New(
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{uint64(12)}),
- )
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- ecChunk := oidtest.ID()
- ecParent := oidtest.ID()
- tombstoneID := oidtest.ID()
-
- chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetID(ecChunk)
- chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- chunkObj.SetPayloadSize(uint64(10))
- chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
-
- // put object with EC
-
- var prm PutPrm
- prm.SetObject(chunkObj)
- prm.SetStorageID([]byte("0/0"))
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- var ecChunkAddress oid.Address
- ecChunkAddress.SetContainer(cnr)
- ecChunkAddress.SetObject(ecChunk)
-
- var ecParentAddress oid.Address
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecParent)
-
- var getPrm GetPrm
-
- getPrm.SetAddress(ecChunkAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.NoError(t, err)
-
- var ecInfoError *objectSDK.ECInfoError
- getPrm.SetAddress(ecParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &ecInfoError)
- require.True(t, len(ecInfoError.ECInfo().Chunks) == 1 &&
- ecInfoError.ECInfo().Chunks[0].Index == 0 &&
- ecInfoError.ECInfo().Chunks[0].Total == 3)
-
- // inhume EC parent (like Delete does)
-
- var inhumePrm InhumePrm
- var tombAddress oid.Address
- tombAddress.SetContainer(cnr)
- tombAddress.SetObject(tombstoneID)
- inhumePrm.SetAddresses(ecParentAddress)
- inhumePrm.SetTombstoneAddress(tombAddress)
- _, err = db.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
-
- getPrm.SetAddress(ecParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-
- getPrm.SetAddress(ecChunkAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-
- // GC finds and deletes split, EC parent and EC chunk
-
- var garbageAddresses []oid.Address
- var itPrm GarbageIterationPrm
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageAddresses = append(garbageAddresses, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 2, len(garbageAddresses))
- require.True(t, slices.Contains(garbageAddresses, ecParentAddress))
- require.True(t, slices.Contains(garbageAddresses, ecChunkAddress))
-
- var deletePrm DeletePrm
- deletePrm.SetAddresses(garbageAddresses...)
- _, err = db.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
-
- garbageAddresses = nil
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageAddresses = append(garbageAddresses, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 0, len(garbageAddresses))
-
- // after tombstone expired GC inhumes tombstone and drops graves
-
- var tombstonedObjects []TombstonedObject
- var graveyardIterationPrm GraveyardIterationPrm
- graveyardIterationPrm.SetHandler(func(object TombstonedObject) error {
- tombstonedObjects = append(tombstonedObjects, object)
- return nil
- })
- require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
- require.Equal(t, 2, len(tombstonedObjects))
-
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
- require.NoError(t, err)
-
- // GC finds tombstone as garbage and deletes it
-
- garbageAddresses = nil
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageAddresses = append(garbageAddresses, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 1, len(garbageAddresses))
- require.Equal(t, tombstoneID, garbageAddresses[0].Object())
-
- deletePrm.SetAddresses(garbageAddresses...)
- _, err = db.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
-
- // no more objects should left as garbage
-
- itPrm.SetHandler(func(g GarbageObject) error {
- require.FailNow(t, "no garbage objects should left")
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
-
- require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft))
-
- require.NoError(t, testCountersAreZero(db, cnr))
-}
-
-func TestDeleteECObject_WithSplit(t *testing.T) {
- t.Parallel()
- for _, c := range []int{1, 2, 3} {
- for _, l := range []bool{true, false} {
- test := fmt.Sprintf("%d EC chunks with split info without linking object", c)
- if l {
- test = fmt.Sprintf("%d EC chunks with split info with linking object", c)
- }
- t.Run(test, func(t *testing.T) {
- testDeleteECObjectWithSplit(t, c, l)
- })
- }
- }
-}
-
-func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool) {
- t.Parallel()
-
- db := New(
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{uint64(12)}),
- )
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- ecChunks := make([]oid.ID, chunksCount)
- for idx := range ecChunks {
- ecChunks[idx] = oidtest.ID()
- }
- ecParentID := oidtest.ID()
- splitParentID := oidtest.ID()
- tombstoneID := oidtest.ID()
- splitID := objectSDK.NewSplitID()
- linkingID := oidtest.ID()
-
- ecChunkObjects := make([]*objectSDK.Object, chunksCount)
- for idx := range ecChunkObjects {
- ecChunkObjects[idx] = testutil.GenerateObjectWithCID(cnr)
- ecChunkObjects[idx].SetContainerID(cnr)
- ecChunkObjects[idx].SetID(ecChunks[idx])
- ecChunkObjects[idx].SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- ecChunkObjects[idx].SetPayloadSize(uint64(10))
- ecChunkObjects[idx].SetECHeader(objectSDK.NewECHeader(
- objectSDK.ECParentInfo{
- ID: ecParentID,
- SplitParentID: &splitParentID, SplitID: splitID,
- }, uint32(idx), uint32(chunksCount+1), []byte{}, 0))
- }
-
- splitParentObj := testutil.GenerateObjectWithCID(cnr)
- splitParentObj.SetID(splitParentID)
-
- var linkingAddress oid.Address
- linkingAddress.SetContainer(cnr)
- linkingAddress.SetObject(linkingID)
-
- linkingObj := testutil.GenerateObjectWithCID(cnr)
- linkingObj.SetID(linkingID)
- linkingObj.SetParent(splitParentObj)
- linkingObj.SetParentID(splitParentID)
- linkingObj.SetChildren(ecParentID, oidtest.ID(), oidtest.ID())
- linkingObj.SetSplitID(splitID)
-
- // put object with EC and split info
-
- var prm PutPrm
- prm.SetStorageID([]byte("0/0"))
- for _, obj := range ecChunkObjects {
- prm.SetObject(obj)
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
- }
-
- if withLinking {
- prm.SetObject(linkingObj)
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
- }
-
- var ecParentAddress oid.Address
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecParentID)
-
- var getPrm GetPrm
- var ecInfoError *objectSDK.ECInfoError
- getPrm.SetAddress(ecParentAddress)
- _, err := db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &ecInfoError)
- require.True(t, len(ecInfoError.ECInfo().Chunks) == chunksCount)
-
- var splitParentAddress oid.Address
- splitParentAddress.SetContainer(cnr)
- splitParentAddress.SetObject(splitParentID)
-
- var splitInfoError *objectSDK.SplitInfoError
- getPrm.SetAddress(splitParentAddress)
- getPrm.SetRaw(true)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &splitInfoError)
- require.True(t, splitInfoError.SplitInfo() != nil)
- require.Equal(t, splitID, splitInfoError.SplitInfo().SplitID())
- lastPart, set := splitInfoError.SplitInfo().LastPart()
- require.True(t, set)
- require.Equal(t, lastPart, ecParentID)
- if withLinking {
- l, ok := splitInfoError.SplitInfo().Link()
- require.True(t, ok)
- require.Equal(t, linkingID, l)
- }
- getPrm.SetRaw(false)
-
- // inhume EC parent and split objects (like Delete does)
-
- inhumeAddresses := []oid.Address{splitParentAddress, ecParentAddress}
- if withLinking {
- inhumeAddresses = append(inhumeAddresses, linkingAddress)
- }
-
- var inhumePrm InhumePrm
- var tombAddress oid.Address
- tombAddress.SetContainer(cnr)
- tombAddress.SetObject(tombstoneID)
- inhumePrm.SetAddresses(inhumeAddresses...)
- inhumePrm.SetTombstoneAddress(tombAddress)
- _, err = db.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
-
- getPrm.SetAddress(ecParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-
- getPrm.SetAddress(splitParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-
- if withLinking {
- getPrm.SetAddress(linkingAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
- }
-
- for _, id := range ecChunks {
- var ecChunkAddress oid.Address
- ecChunkAddress.SetContainer(cnr)
- ecChunkAddress.SetObject(id)
- getPrm.SetAddress(ecChunkAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
- }
-
- // GC finds and deletes split, EC parent and EC chunks
-
- parentCount := 2 // split + ec
- if withLinking {
- parentCount = 3
- }
-
- var garbageAddresses []oid.Address
- var itPrm GarbageIterationPrm
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageAddresses = append(garbageAddresses, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, parentCount+chunksCount, len(garbageAddresses))
- require.True(t, slices.Contains(garbageAddresses, splitParentAddress))
- require.True(t, slices.Contains(garbageAddresses, ecParentAddress))
- if withLinking {
- require.True(t, slices.Contains(garbageAddresses, linkingAddress))
- }
- for _, id := range ecChunks {
- var ecChunkAddress oid.Address
- ecChunkAddress.SetContainer(cnr)
- ecChunkAddress.SetObject(id)
- require.True(t, slices.Contains(garbageAddresses, ecChunkAddress))
- }
-
- var deletePrm DeletePrm
- deletePrm.SetAddresses(garbageAddresses...)
- _, err = db.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
-
- var garbageStub []oid.Address
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageStub = append(garbageStub, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 0, len(garbageStub))
-
- // after tombstone expired GC inhumes tombstone and drops graves
-
- var tombstonedObjects []TombstonedObject
- var graveyardIterationPrm GraveyardIterationPrm
- graveyardIterationPrm.SetHandler(func(object TombstonedObject) error {
- tombstonedObjects = append(tombstonedObjects, object)
- return nil
- })
- require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm))
- require.True(t, len(tombstonedObjects) == parentCount+chunksCount)
-
- _, err = db.InhumeTombstones(context.Background(), tombstonedObjects)
- require.NoError(t, err)
-
- // GC finds tombstone as garbage and deletes it
-
- garbageAddresses = nil
- itPrm.SetHandler(func(g GarbageObject) error {
- garbageAddresses = append(garbageAddresses, g.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 1, len(garbageAddresses))
- require.Equal(t, tombstoneID, garbageAddresses[0].Object())
-
- deletePrm.SetAddresses(garbageAddresses...)
- _, err = db.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
-
- // no more objects should left as garbage
-
- itPrm.SetHandler(func(g GarbageObject) error {
- require.FailNow(t, "no garbage objects should left")
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
-
- require.NoError(t, db.boltDB.View(testVerifyNoObjectDataLeft))
-
- require.NoError(t, testCountersAreZero(db, cnr))
-}
-
-func testVerifyNoObjectDataLeft(tx *bbolt.Tx) error {
- return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
- if bytes.Equal(name, shardInfoBucket) ||
- bytes.Equal(name, containerCounterBucketName) ||
- bytes.Equal(name, containerVolumeBucketName) ||
- bytes.Equal(name, expEpochToObjectBucketName) {
- return nil
- }
- return testBucketEmpty(name, b)
- })
-}
-
-func testBucketEmpty(name []byte, b *bbolt.Bucket) error {
- err := b.ForEach(func(k, v []byte) error {
- if len(v) > 0 {
- return fmt.Errorf("bucket %v is not empty", name)
- }
- return nil
- })
- if err != nil {
- return err
- }
- return b.ForEachBucket(func(k []byte) error {
- return testBucketEmpty(k, b.Bucket(k))
- })
-}
-
-func testCountersAreZero(db *DB, cnr cid.ID) error {
- c, err := db.ContainerCount(context.Background(), cnr)
- if err != nil {
- return err
- }
- if !c.IsZero() {
- return fmt.Errorf("container %s has non zero counters", cnr.EncodeToString())
- }
- s, err := db.ContainerSize(cnr)
- if err != nil {
- return err
- }
- if s != 0 {
- return fmt.Errorf("container %s has non zero size", cnr.EncodeToString())
- }
- return nil
-}
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
deleted file mode 100644
index 0329e3a73..000000000
--- a/pkg/local_object_storage/metabase/delete_meta_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package meta
-
-import (
- "bytes"
- "context"
- "path/filepath"
- "testing"
-
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
-)
-
-func TestPutDeleteIndexAttributes(t *testing.T) {
- db := New([]Option{
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{}),
- }...)
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- obj1 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
- testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
-
- var putPrm PutPrm
- putPrm.SetObject(obj1)
-
- _, err := db.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.Nil(t, b)
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.Nil(t, b)
- return nil
- }))
-
- obj2 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
- testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
-
- putPrm.SetObject(obj2)
- putPrm.SetIndexAttributes(true)
-
- _, err = db.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.NotNil(t, b)
- b = b.Bucket([]byte("CRDT-Name"))
- require.NotNil(t, b)
- require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.NotNil(t, b)
- b = b.Bucket([]byte("/path/to/object"))
- require.NotNil(t, b)
- require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
- return nil
- }))
-
- var dPrm DeletePrm
- dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
- _, err = db.Delete(context.Background(), dPrm)
- require.NoError(t, err)
-
- require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
- require.Nil(t, b)
- b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
- require.Nil(t, b)
- return nil
- }))
-}
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index c0762a377..9b68f0bf8 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -18,7 +18,6 @@ import (
func TestDB_Delete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr)
@@ -33,6 +32,15 @@ func TestDB_Delete(t *testing.T) {
err := putBig(db, child)
require.NoError(t, err)
+ // fill ToMoveIt index
+ err = metaToMoveIt(db, object.AddressOf(child))
+ require.NoError(t, err)
+
+ // check if Movable list is not empty
+ l, err := metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, l, 1)
+
// try to remove parent, should be no-op, error-free
err = metaDelete(db, object.AddressOf(parent))
require.NoError(t, err)
@@ -40,18 +48,23 @@ func TestDB_Delete(t *testing.T) {
// inhume parent and child so they will be on graveyard
ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
require.NoError(t, err)
ts = testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
require.NoError(t, err)
// delete object
err = metaDelete(db, object.AddressOf(child))
require.NoError(t, err)
+ // check if there is no data in Movable index
+ l, err = metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, l, 0)
+
// check if they marked as already removed
ok, err := metaExists(db, object.AddressOf(child))
@@ -65,7 +78,6 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -103,12 +115,11 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.ID()))
+ require.NoError(t, metaInhume(db, addr, oidtest.Address()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
@@ -116,7 +127,6 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free
@@ -126,79 +136,6 @@ func TestExpiredObject(t *testing.T) {
})
}
-func TestDelete(t *testing.T) {
- db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- for range 10 {
- obj := testutil.GenerateObjectWithCID(cnr)
-
- var prm meta.PutPrm
- prm.SetObject(obj)
- prm.SetStorageID([]byte("0/0"))
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- var inhumePrm meta.InhumePrm
- inhumePrm.SetAddresses(object.AddressOf(obj))
- _, err = db.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
- }
-
- var addrs []oid.Address
- var iprm meta.GarbageIterationPrm
- iprm.SetHandler(func(o meta.GarbageObject) error {
- addrs = append(addrs, o.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), iprm))
- require.Equal(t, 10, len(addrs))
- var deletePrm meta.DeletePrm
- deletePrm.SetAddresses(addrs...)
- _, err := db.Delete(context.Background(), deletePrm)
- require.NoError(t, err)
-
- addrs = nil
- iprm.SetHandler(func(o meta.GarbageObject) error {
- addrs = append(addrs, o.Address())
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), iprm))
- require.Equal(t, 0, len(addrs))
-}
-
-func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) {
- db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- addr := oidtest.Address()
-
- var prm meta.InhumePrm
- prm.SetAddresses(addr)
- prm.SetGCMark()
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
-
- var garbageCount int
- var itPrm meta.GarbageIterationPrm
- itPrm.SetHandler(func(g meta.GarbageObject) error {
- garbageCount++
- return nil
- })
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 1, garbageCount)
-
- var delPrm meta.DeletePrm
- delPrm.SetAddresses(addr)
- _, err = db.Delete(context.Background(), delPrm)
- require.NoError(t, err)
-
- garbageCount = 0
- require.NoError(t, db.IterateOverGarbage(context.Background(), itPrm))
- require.Equal(t, 0, garbageCount)
-}
-
func metaDelete(db *meta.DB, addrs ...oid.Address) error {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(addrs...)
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 7bd6f90a6..aa9aba106 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -5,6 +5,7 @@ import (
"fmt"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -19,14 +20,12 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- addr oid.Address
- ecParentAddr oid.Address
+ addr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
type ExistsRes struct {
exists bool
- locked bool
}
var ErrLackSplitInfo = logicerr.New("no split info on parent object")
@@ -36,21 +35,11 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) {
p.addr = addr
}
-// SetECParent is an Exists option to set objects parent.
-func (p *ExistsPrm) SetECParent(addr oid.Address) {
- p.ecParentAddr = addr
-}
-
// Exists returns the fact that the object is in the metabase.
func (p ExistsRes) Exists() bool {
return p.exists
}
-// Locked returns the fact that the object is locked.
-func (p ExistsRes) Locked() bool {
- return p.locked
-}
-
// Exists returns ErrAlreadyRemoved if addr was marked as removed. Otherwise it
// returns true if addr is in primary index or false if it is not.
//
@@ -81,7 +70,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.View(func(tx *bbolt.Tx) error {
- res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch)
+ res.exists, err = db.exists(tx, prm.addr, currEpoch)
return err
})
@@ -89,34 +78,15 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err
return res, metaerr.Wrap(err)
}
-func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) {
- var locked bool
- if !ecParent.Equals(oid.Address{}) {
- st, err := objectStatus(tx, ecParent, currEpoch)
- if err != nil {
- return false, false, err
- }
- switch st {
- case 2:
- return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
- case 3:
- return false, locked, ErrObjectIsExpired
- }
-
- locked = objectLocked(tx, ecParent.Container(), ecParent.Object())
- }
+func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (exists bool, err error) {
// check graveyard and object expiration first
- st, err := objectStatus(tx, addr, currEpoch)
- if err != nil {
- return false, false, err
- }
- switch st {
+ switch objectStatus(tx, addr, currEpoch) {
case 1:
- return false, locked, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ return false, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
- return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
+ return false, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved))
case 3:
- return false, locked, ErrObjectIsExpired
+ return false, ErrObjectIsExpired
}
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
@@ -126,25 +96,21 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE
// if graveyard is empty, then check if object exists in primary bucket
if inBucket(tx, primaryBucketName(cnr, key), objKey) {
- return true, locked, nil
+ return true, nil
}
// if primary bucket is empty, then check if object exists in parent bucket
if inBucket(tx, parentBucketName(cnr, key), objKey) {
splitInfo, err := getSplitInfo(tx, cnr, objKey)
if err != nil {
- return false, locked, err
+ return false, err
}
- return false, locked, logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo))
- }
- // if parent bucket is empty, then check if object exists in ec bucket
- if data := getFromBucket(tx, ecInfoBucketName(cnr, key), objKey); len(data) != 0 {
- return false, locked, getECInfoError(tx, cnr, data)
+ return false, logicerr.Wrap(objectSDK.NewSplitInfoError(splitInfo))
}
// if parent bucket is empty, then check if object exists in typed buckets
- return firstIrregularObjectType(tx, cnr, objKey) != objectSDK.TypeRegular, locked, nil
+ return firstIrregularObjectType(tx, cnr, objKey) != objectSDK.TypeRegular, nil
}
// objectStatus returns:
@@ -152,29 +118,30 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE
// - 1 if object with GC mark;
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
-func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
- return objectStatusWithCache(nil, tx, addr, currEpoch)
-}
-
-func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
+func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) uint8 {
// locked object could not be removed/marked with GC/expired
- if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) {
- return 0, nil
+ if objectLocked(tx, addr.Container(), addr.Object()) {
+ return 0
}
- expired, err := isExpiredWithCache(bc, tx, addr, currEpoch)
- if err != nil {
- return 0, err
+ // we check only if the object is expired in the current
+ // epoch since it is considered the only corner case: the
+ // GC is expected to collect all the objects that have
+ // expired previously for less than the one epoch duration
+
+ expired := isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpoch, addr, currEpoch)
+ if !expired {
+ expired = isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpochNeoFS, addr, currEpoch)
}
if expired {
- return 3, nil
+ return 3
}
- graveyardBkt := getGraveyardBucket(bc, tx)
- garbageBkt := getGarbageBucket(bc, tx)
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
addrKey := addressKey(addr, make([]byte, addressKeySize))
- return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
+ return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt)
}
func inGraveyardWithKey(addrKey []byte, graveyard, garbageBCK *bbolt.Bucket) uint8 {
@@ -232,7 +199,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(rawSplitInfo)
if err != nil {
- return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
+ return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
}
return splitInfo, nil
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 3045e17f1..06394339a 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -1,7 +1,6 @@
package meta_test
import (
- "context"
"errors"
"testing"
@@ -19,7 +18,6 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject()
@@ -38,7 +36,7 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index a1351cb6f..43933d12d 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -2,11 +2,12 @@ package meta
import (
"context"
- "encoding/binary"
"errors"
+ "fmt"
"strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -16,8 +17,6 @@ import (
"go.opentelemetry.io/otel/trace"
)
-var errInvalidEpochValueLength = errors.New("could not parse expiration epoch: invalid data length")
-
// FilterExpired return expired items from addresses.
// Address considered expired if metabase does contain information about expiration and
// expiration epoch is less than epoch.
@@ -58,14 +57,33 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
default:
}
- expired, err := selectExpiredObjects(tx, epoch, containerID, objectIDs)
+ expiredNeoFS, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpochNeoFS, epoch, containerID, objectIDs)
if err != nil {
return err
}
- result = append(result, expired...)
+
+ expiredSys, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpoch, epoch, containerID, objectIDs)
+ if err != nil {
+ return err
+ }
+
+ for _, o := range expiredNeoFS {
+ var a oid.Address
+ a.SetContainer(containerID)
+ a.SetObject(o)
+ result = append(result, a)
+ }
+
+ for _, o := range expiredSys {
+ var a oid.Address
+ a.SetContainer(containerID)
+ a.SetObject(o)
+ result = append(result, a)
+ }
}
return nil
})
+
if err != nil {
return nil, metaerr.Wrap(err)
}
@@ -73,41 +91,76 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
return result, nil
}
-func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- return isExpiredWithCache(nil, tx, addr, currEpoch)
+func isExpiredWithAttribute(tx *bbolt.Tx, attr string, addr oid.Address, currEpoch uint64) bool {
+ // bucket with objects that have expiration attr
+ attrKey := make([]byte, bucketKeySize+len(attr))
+ expirationBucket := tx.Bucket(attributeBucketName(addr.Container(), attr, attrKey))
+ if expirationBucket != nil {
+ // bucket that contains objects that expire in the current epoch
+ prevEpochBkt := expirationBucket.Bucket([]byte(strconv.FormatUint(currEpoch-1, 10)))
+ if prevEpochBkt != nil {
+ rawOID := objectKey(addr.Object(), make([]byte, objectKeySize))
+ if prevEpochBkt.Get(rawOID) != nil {
+ return true
+ }
+ }
+ }
+
+ return false
}
-func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
- b := getExpiredBucket(bc, tx, addr.Container())
- if b == nil {
- return false, nil
+func selectExpiredObjectIDs(tx *bbolt.Tx, attr string, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.ID, error) {
+ result := make([]oid.ID, 0)
+ notResolved := make(map[oid.ID]struct{})
+ for _, oid := range objectIDs {
+ notResolved[oid] = struct{}{}
}
- key := make([]byte, objectKeySize)
- addr.Object().Encode(key)
- val := b.Get(key)
- if len(val) == 0 {
- return false, nil
- }
- if len(val) != epochSize {
- return false, errInvalidEpochValueLength
- }
- expEpoch := binary.LittleEndian.Uint64(val)
- return expEpoch < currEpoch, nil
-}
-func selectExpiredObjects(tx *bbolt.Tx, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.Address, error) {
- result := make([]oid.Address, 0)
- var addr oid.Address
- addr.SetContainer(containerID)
- for _, objID := range objectIDs {
- addr.SetObject(objID)
- expired, err := isExpired(tx, addr, epoch)
+ expiredBuffer := make([]oid.ID, 0)
+ objectKeyBuffer := make([]byte, objectKeySize)
+
+ expirationBucketKey := make([]byte, bucketKeySize+len(attr))
+ expirationBucket := tx.Bucket(attributeBucketName(containerID, attr, expirationBucketKey))
+ if expirationBucket == nil {
+ return result, nil // all not expired
+ }
+
+ err := expirationBucket.ForEach(func(epochExpBucketKey, _ []byte) error {
+ bucketExpiresAfter, err := strconv.ParseUint(string(epochExpBucketKey), 10, 64)
if err != nil {
- return nil, err
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
+ } else if bucketExpiresAfter >= epoch {
+ return nil
}
- if expired {
- result = append(result, addr)
+
+ epochExpirationBucket := expirationBucket.Bucket(epochExpBucketKey)
+ if epochExpirationBucket == nil {
+ return nil
}
+
+ expiredBuffer = expiredBuffer[:0]
+ for oid := range notResolved {
+ key := objectKey(oid, objectKeyBuffer)
+ if epochExpirationBucket.Get(key) != nil {
+ expiredBuffer = append(expiredBuffer, oid)
+ }
+ }
+
+ for _, oid := range expiredBuffer {
+ delete(notResolved, oid)
+ result = append(result, oid)
+ }
+
+ if len(notResolved) == 0 {
+ return errBreakBucketForEach
+ }
+
+ return nil
+ })
+
+ if err != nil && !errors.Is(err, errBreakBucketForEach) {
+ return nil, err
}
+
return result, nil
}
diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go
index 495c1eee7..9a6bcc5db 100644
--- a/pkg/local_object_storage/metabase/expired_test.go
+++ b/pkg/local_object_storage/metabase/expired_test.go
@@ -13,7 +13,6 @@ import (
func TestDB_SelectExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
containerID1 := cidtest.ID()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index 821810c09..d18331a3d 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -88,16 +88,8 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
- return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
-}
-
-func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
- if err != nil {
- return nil, err
- }
- switch st {
+ switch objectStatus(tx, addr, currEpoch) {
case 1:
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
@@ -113,15 +105,9 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
bucketName := make([]byte, bucketKeySize)
// check in primary index
- if b := getPrimaryBucket(bc, tx, cnr); b != nil {
- if data := b.Get(key); len(data) != 0 {
- return obj, obj.Unmarshal(data)
- }
- }
-
- data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
+ data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return nil, getECInfoError(tx, cnr, data)
+ return obj, obj.Unmarshal(data)
}
// if not found then check in tombstone index
@@ -169,29 +155,17 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- var data []byte
- for i := 0; i < len(relativeLst) && len(data) == 0; i++ {
- virtualOID := relativeLst[len(relativeLst)-i-1]
- data = getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
- }
-
- if len(data) == 0 {
- // check if any of the relatives is an EC object
- for _, relative := range relativeLst {
- data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), relative)
- if len(data) > 0 {
- // we can't return object headers, but can return error,
- // so assembler can try to assemble complex object
- return nil, getSplitInfoError(tx, cnr, key)
- }
- }
- }
+ // pick last item, for now there is not difference which address to pick
+ // but later list might be sorted so first or last value can be more
+ // prioritized to choose
+ virtualOID := relativeLst[len(relativeLst)-1]
+ data := getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
child := objectSDK.New()
err = child.Unmarshal(data)
if err != nil {
- return nil, fmt.Errorf("unmarshal child with parent: %w", err)
+ return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
}
par := child.Parent()
@@ -211,28 +185,3 @@ func getSplitInfoError(tx *bbolt.Tx, cnr cid.ID, key []byte) error {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
-
-func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error {
- keys, err := decodeList(data)
- if err != nil {
- return err
- }
- ecInfo := objectSDK.NewECInfo()
- for _, key := range keys {
- // check in primary index
- objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key)
- if len(objData) != 0 {
- obj := objectSDK.New()
- if err := obj.Unmarshal(objData); err != nil {
- return err
- }
- chunk := objectSDK.ECChunk{}
- id, _ := obj.ID()
- chunk.SetID(id)
- chunk.Index = obj.ECHeader().Index()
- chunk.Total = obj.ECHeader().Total()
- ecInfo.AddChunk(chunk)
- }
- }
- return logicerr.Wrap(objectSDK.NewECInfoError(ecInfo))
-}
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 98c428410..98a4bd960 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -3,7 +3,6 @@ package meta_test
import (
"bytes"
"context"
- "errors"
"fmt"
"os"
"runtime"
@@ -16,16 +15,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
@@ -112,46 +108,11 @@ func TestDB_Get(t *testing.T) {
require.True(t, binaryEqual(child.CutPayload(), newChild))
})
- t.Run("put erasure-coded object", func(t *testing.T) {
- cnr := cidtest.ID()
- virtual := testutil.GenerateObjectWithCID(cnr)
- c, err := erasurecode.NewConstructor(3, 1)
- require.NoError(t, err)
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- parts, err := c.Split(virtual, &pk.PrivateKey)
- require.NoError(t, err)
- for _, part := range parts {
- err = putBig(db, part)
- var eiError *objectSDK.ECInfoError
- if err != nil && !errors.As(err, &eiError) {
- require.NoError(t, err)
- }
- }
- _, err = metaGet(db, object.AddressOf(virtual), true)
- var eiError *objectSDK.ECInfoError
- require.ErrorAs(t, err, &eiError)
- require.Equal(t, len(eiError.ECInfo().Chunks), len(parts))
- for _, chunk := range eiError.ECInfo().Chunks {
- var found bool
- for _, part := range parts {
- partID, _ := part.ID()
- var chunkID oid.ID
- require.NoError(t, chunkID.ReadFromV2(chunk.ID))
- if chunkID.Equals(partID) {
- found = true
- }
- }
- if !found {
- require.Fail(t, "chunk not found")
- }
- }
- })
-
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
+ ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, oidtest.ID()))
+ require.NoError(t, metaInhume(db, obj, ts))
_, err := metaGet(db, obj, false)
require.True(t, client.IsErrObjectAlreadyRemoved(err))
@@ -221,7 +182,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
)
addrs := make([]oid.Address, 0, numOfObj)
- for range numOfObj {
+ for i := 0; i < numOfObj; i++ {
raw := testutil.GenerateObject()
addrs = append(addrs, object.AddressOf(raw))
@@ -233,7 +194,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
- defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()
@@ -253,14 +213,14 @@ func benchmarkGet(b *testing.B, numOfObj int) {
})
})
- require.NoError(b, db.Close(context.Background()))
+ require.NoError(b, db.Close())
require.NoError(b, os.RemoveAll(b.Name()))
db, addrs = prepareDb(1)
b.Run("serial", func(b *testing.B) {
b.ReportAllocs()
- for i := range b.N {
+ for i := 0; i < b.N; i++ {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 2f23d424c..df9a3d302 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
)
@@ -129,7 +128,7 @@ func (g *GraveyardIterationPrm) SetHandler(h TombstonedHandler) {
// Note: if offset is not found in db, iteration starts
// from the element that WOULD BE the following after the
// offset if offset was presented. That means that it is
-// safe to delete offset element and pass it to the
+// safe to delete offset element and pass if to the
// iteration once again: iteration would start from the
// next element.
//
@@ -177,7 +176,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
- return fmt.Errorf("parse garbage object: %w", err)
+ return fmt.Errorf("could not parse garbage object: %w", err)
}
return g.h(o)
@@ -190,7 +189,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
- return fmt.Errorf("parse grave: %w", err)
+ return fmt.Errorf("could not parse grave: %w", err)
}
return g.h(o)
@@ -240,7 +239,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
- err = fmt.Errorf("parse address: %w", err)
+ err = fmt.Errorf("could not parse address: %w", err)
}
return
@@ -256,58 +255,46 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) {
return
}
-// InhumeTombstones deletes tombstoned objects from the
+// DropGraves deletes tombstoned objects from the
// graveyard bucket.
//
// Returns any error appeared during deletion process.
-func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) {
+func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
var (
startedAt = time.Now()
success = false
)
defer func() {
- db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success)
+ db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success)
}()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones")
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
if db.mode.NoMetabase() {
- return InhumeRes{}, ErrDegradedMode
+ return ErrDegradedMode
} else if db.mode.ReadOnly() {
- return InhumeRes{}, ErrReadOnlyMode
+ return ErrReadOnlyMode
}
buf := make([]byte, addressKeySize)
- prm := InhumePrm{forceRemoval: true}
- currEpoch := db.epochState.CurrentEpoch()
- var res InhumeRes
-
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
- res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)}
-
- garbageBKT := tx.Bucket(garbageBucketName)
- graveyardBKT := tx.Bucket(graveyardBucketName)
-
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
- if err != nil {
- return err
+ return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(graveyardBucketName)
+ if bkt == nil {
+ return nil
}
- for i := range tss {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil {
- return err
- }
- if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil {
+ for _, ts := range tss {
+ err := bkt.Delete(addressKey(ts.Address(), buf))
+ if err != nil {
return err
}
}
return nil
})
- return res, err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index ebadecc04..7476608f2 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,9 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -17,7 +14,6 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
var counter int
var iterGravePRM meta.GraveyardIterationPrm
@@ -44,7 +40,6 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject()
@@ -115,14 +110,12 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
- cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
- obj3 := testutil.GenerateObjectWithCID(cnr)
- obj4 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
+ obj3 := testutil.GenerateObject()
+ obj4 := testutil.GenerateObject()
var err error
@@ -142,7 +135,6 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
- addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
@@ -204,14 +196,12 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
- cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
- obj3 := testutil.GenerateObjectWithCID(cnr)
- obj4 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
+ obj3 := testutil.GenerateObject()
+ obj4 := testutil.GenerateObject()
var err error
@@ -229,7 +219,6 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
- addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -305,7 +294,6 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
// generate and put 4 objects
obj1 := testutil.GenerateObject()
@@ -395,14 +383,12 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
require.False(t, iWasCalled)
}
-func TestDB_InhumeTombstones(t *testing.T) {
+func TestDB_DropGraves(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
- cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := testutil.GenerateObjectWithCID(cnr)
- obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj1 := testutil.GenerateObject()
+ obj2 := testutil.GenerateObject()
var err error
@@ -412,20 +398,8 @@ func TestDB_InhumeTombstones(t *testing.T) {
err = putBig(db, obj2)
require.NoError(t, err)
- id1, _ := obj1.ID()
- id2, _ := obj2.ID()
- ts := objectSDK.NewTombstone()
- ts.SetMembers([]oid.ID{id1, id2})
- objTs := objectSDK.New()
- objTs.SetContainerID(cnr)
- objTs.SetType(objectSDK.TypeTombstone)
-
- data, _ := ts.Marshal()
- objTs.SetPayload(data)
- require.NoError(t, objectSDK.CalculateAndSetID(objTs))
- require.NoError(t, putBig(db, objTs))
-
- addrTombstone := object.AddressOf(objTs)
+ // inhume with tombstone
+ addrTombstone := oidtest.Address()
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
@@ -448,11 +422,8 @@ func TestDB_InhumeTombstones(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 2, counter)
- res, err := db.InhumeTombstones(context.Background(), buriedTS)
+ err = db.DropGraves(context.Background(), buriedTS)
require.NoError(t, err)
- require.EqualValues(t, 1, res.LogicInhumed())
- require.EqualValues(t, 0, res.UserInhumed())
- require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID())
counter = 0
iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error {
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 76018fb61..fe8b8873e 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -7,7 +7,6 @@ import (
"fmt"
"time"
- storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -31,34 +30,21 @@ type InhumePrm struct {
// DeletionInfo contains details on deleted object.
type DeletionInfo struct {
- Size uint64
- CID cid.ID
- IsUser bool
+ Size uint64
+ CID cid.ID
}
// InhumeRes encapsulates results of Inhume operation.
type InhumeRes struct {
- deletedLockObj []oid.Address
- logicInhumed uint64
- userInhumed uint64
- inhumedByCnrID map[cid.ID]ObjectCounters
- deletionDetails []DeletionInfo
+ deletedLockObj []oid.Address
+ availableImhumed uint64
+ deletionDetails []DeletionInfo
}
-// LogicInhumed return number of logic object
+// AvailableInhumed return number of available object
// that have been inhumed.
-func (i InhumeRes) LogicInhumed() uint64 {
- return i.logicInhumed
-}
-
-func (i InhumeRes) UserInhumed() uint64 {
- return i.userInhumed
-}
-
-// InhumedByCnrID return number of object
-// that have been inhumed by container ID.
-func (i InhumeRes) InhumedByCnrID() map[cid.ID]ObjectCounters {
- return i.inhumedByCnrID
+func (i InhumeRes) AvailableInhumed() uint64 {
+ return i.availableImhumed
}
// DeletedLockObjects returns deleted object of LOCK
@@ -82,32 +68,11 @@ func (i InhumeRes) GetDeletionInfoByIndex(target int) DeletionInfo {
// StoreDeletionInfo stores size of deleted object and associated container ID
// in corresponding arrays.
-func (i *InhumeRes) storeDeletionInfo(containerID cid.ID, deletedSize uint64, isUser bool) {
+func (i *InhumeRes) storeDeletionInfo(containerID cid.ID, deletedSize uint64) {
i.deletionDetails = append(i.deletionDetails, DeletionInfo{
- Size: deletedSize,
- CID: containerID,
- IsUser: isUser,
+ Size: deletedSize,
+ CID: containerID,
})
- i.logicInhumed++
- if isUser {
- i.userInhumed++
- }
-
- if v, ok := i.inhumedByCnrID[containerID]; ok {
- v.Logic++
- if isUser {
- v.User++
- }
- i.inhumedByCnrID[containerID] = v
- } else {
- v = ObjectCounters{
- Logic: 1,
- }
- if isUser {
- v.User = 1
- }
- i.inhumedByCnrID[containerID] = v
- }
}
// SetAddresses sets a list of object addresses that should be inhumed.
@@ -143,20 +108,6 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
-func (p *InhumePrm) validate() error {
- if p == nil {
- return nil
- }
- if p.tomb != nil {
- for _, addr := range p.target {
- if addr.Container() != p.tomb.Container() {
- return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
- }
- }
- }
- return nil
-}
-
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -171,7 +122,7 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
-func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
var (
startedAt = time.Now()
success = false
@@ -185,31 +136,17 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
- if err := prm.validate(); err != nil {
- return InhumeRes{}, err
- }
-
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
return InhumeRes{}, ErrReadOnlyMode
}
- res := InhumeRes{
- inhumedByCnrID: make(map[cid.ID]ObjectCounters),
- }
currEpoch := db.epochState.CurrentEpoch()
- err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ err = db.boltDB.Update(func(tx *bbolt.Tx) error {
return db.inhumeTx(tx, currEpoch, prm, &res)
})
success = err == nil
- if success {
- for _, addr := range prm.target {
- storagelog.Write(ctx, db.log,
- storagelog.AddressField(addr),
- storagelog.OpField("metabase INHUME"))
- }
- }
return res, metaerr.Wrap(err)
}
@@ -217,139 +154,77 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
garbageBKT := tx.Bucket(garbageBucketName)
graveyardBKT := tx.Bucket(graveyardBucketName)
- bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm)
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
if err != nil {
return err
}
buf := make([]byte, addressKeySize)
for i := range prm.target {
- if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil {
- return err
- }
- }
+ id := prm.target[i].Object()
+ cnr := prm.target[i].Container()
- return db.applyInhumeResToCounters(tx, res)
-}
-
-func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error {
- id := addr.Object()
- cnr := addr.Container()
- tx := bkt.Tx()
-
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return new(apistatus.ObjectLocked)
- }
-
- var lockWasChecked bool
-
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return new(apistatus.ObjectLocked)
}
- lockWasChecked = true
- }
+ var lockWasChecked bool
- obj, err := db.get(tx, addr, buf, false, true, epoch)
- targetKey := addressKey(addr, buf)
- var ecErr *objectSDK.ECInfoError
- if err == nil {
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
- if err != nil {
- return err
- }
- } else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
- if err != nil {
- return err
- }
- }
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
+ }
- if prm.tomb != nil {
- var isTomb bool
- isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
- if err != nil {
- return err
+ lockWasChecked = true
}
- if isTomb {
- return nil
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
- if err != nil {
- return err
- }
-
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- return nil
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, addr)
- }
- }
- return nil
-}
-
-func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
- garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
- ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
-) error {
- for _, chunk := range ecInfo.Chunks {
- chunkBuf := make([]byte, addressKeySize)
- var chunkAddr oid.Address
- chunkAddr.SetContainer(cnr)
- var chunkID oid.ID
- err := chunkID.ReadFromV2(chunk.ID)
- if err != nil {
- return err
- }
- chunkAddr.SetObject(chunkID)
- chunkObj, err := db.get(tx, chunkAddr, chunkBuf, false, true, epoch)
- if err != nil {
- return err
- }
- chunkKey := addressKey(chunkAddr, chunkBuf)
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, chunkKey, cnr, chunkObj, res)
- if err != nil {
- return err
- }
- if tomb != nil {
- _, err = db.markAsGC(graveyardBKT, garbageBKT, chunkKey)
+ obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
+ targetKey := addressKey(prm.target[i], buf)
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
if err != nil {
return err
}
}
- err = targetBucket.Put(chunkKey, value)
+
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
+ if err != nil {
+ return err
+ }
+
+ if isTomb {
+ continue
+ }
+ }
+
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
if err != nil {
return err
}
- }
- return nil
-}
-func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
- if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil {
- return err
- }
- if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil {
- return err
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ continue
+ }
+
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
+ }
+ }
}
- return db.updateContainerCounter(tx, res.inhumedByCnrID, false)
+ return db.updateCounter(tx, logical, res.availableImhumed, false)
}
// getInhumeTargetBucketAndValue return target bucket to store inhume result and value that will be put in the bucket.
@@ -362,7 +237,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error {
// 1. tombstone address if Inhume was called with
// a Tombstone
// 2. zeroValue if Inhume was called with a GC mark
-func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
if prm.tomb != nil {
targetBucket = graveyardBKT
tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
@@ -373,7 +248,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
- return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
+ return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
}
}
@@ -385,8 +260,11 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
return targetBucket, value, nil
}
-func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
- targetIsTomb := isTomb(graveyardBKT, addressKey)
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
+ targetIsTomb, err := isTomb(graveyardBKT, key)
+ if err != nil {
+ return false, err
+ }
// do not add grave if target is a tombstone
if targetIsTomb {
@@ -395,13 +273,14 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte
// if tombstone appears object must be
// additionally marked with GC
- return false, garbageBKT.Put(addressKey, zeroValue)
+ return false, garbageBKT.Put(key, zeroValue)
}
func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
containerID, _ := obj.ContainerID()
if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 {
- res.storeDeletionInfo(containerID, obj.PayloadSize(), IsUserObject(obj))
+ res.availableImhumed++
+ res.storeDeletionInfo(containerID, obj.PayloadSize())
}
// if object is stored, and it is regular object then update bucket
@@ -415,21 +294,25 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc
return nil
}
-func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
+func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
targetIsTomb := false
// iterate over graveyard and check if target address
// is the address of tombstone in graveyard.
- // tombstone must have the same container ID as key.
- c := graveyardBucket.Cursor()
- containerPrefix := addressKey[:cidSize]
- for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
+ err := graveyardBucket.ForEach(func(k, v []byte) error {
// check if graveyard has record with key corresponding
// to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, addressKey)
+ targetIsTomb = bytes.Equal(v, key)
+
if targetIsTomb {
- break
+ // break bucket iterator
+ return errBreakBucketForEach
}
+
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBreakBucketForEach) {
+ return false, err
}
- return targetIsTomb
+ return targetIsTomb, nil
}
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
deleted file mode 100644
index 180713287..000000000
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package meta
-
-import (
- "context"
- "path/filepath"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestInhumeECObject(t *testing.T) {
- t.Parallel()
-
- db := New(
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{uint64(12)}),
- )
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- ecChunk := oidtest.ID()
- ecChunk2 := oidtest.ID()
- ecParent := oidtest.ID()
- tombstoneID := oidtest.ID()
-
- chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetID(ecChunk)
- chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
- chunkObj.SetPayloadSize(uint64(5))
- chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
-
- chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetID(ecChunk2)
- chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- chunkObj2.SetPayloadSize(uint64(10))
- chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 1, 3, []byte{}, 0))
-
- // put object with EC
-
- var prm PutPrm
- prm.SetObject(chunkObj)
- prm.SetStorageID([]byte("0/0"))
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- prm.SetObject(chunkObj2)
- _, err = db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- var ecChunkAddress oid.Address
- ecChunkAddress.SetContainer(cnr)
- ecChunkAddress.SetObject(ecChunk)
-
- var ecParentAddress oid.Address
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecParent)
-
- var chunkObjectAddress oid.Address
- chunkObjectAddress.SetContainer(cnr)
- chunkObjectAddress.SetObject(ecChunk)
-
- var getPrm GetPrm
-
- getPrm.SetAddress(ecChunkAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.NoError(t, err)
-
- var ecInfoError *objectSDK.ECInfoError
- getPrm.SetAddress(ecParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, &ecInfoError)
- require.True(t, len(ecInfoError.ECInfo().Chunks) == 2 &&
- ecInfoError.ECInfo().Chunks[0].Index == 0 &&
- ecInfoError.ECInfo().Chunks[0].Total == 3)
-
- // inhume Chunk
- var inhumePrm InhumePrm
- var tombAddress oid.Address
- inhumePrm.SetAddresses(chunkObjectAddress)
- res, err := db.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
- require.True(t, len(res.deletionDetails) == 1)
- require.True(t, res.deletionDetails[0].Size == 5)
-
- // inhume EC parent (like Delete does)
- tombAddress.SetContainer(cnr)
- tombAddress.SetObject(tombstoneID)
- inhumePrm.SetAddresses(ecParentAddress)
- inhumePrm.SetTombstoneAddress(tombAddress)
- res, err = db.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
- // Previously deleted chunk shouldn't be in the details, because it is marked as garbage
- require.True(t, len(res.deletionDetails) == 1)
- require.True(t, res.deletionDetails[0].Size == 10)
-
- getPrm.SetAddress(ecParentAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-
- getPrm.SetAddress(ecChunkAddress)
- _, err = db.Get(context.Background(), getPrm)
- require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
-}
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 786d10396..378e38e79 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -9,7 +9,6 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -17,15 +16,16 @@ import (
func TestDB_Inhume(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
+ tombstoneID := oidtest.Address()
+
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
+ err = metaInhume(db, object.AddressOf(raw), tombstoneID)
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
@@ -37,25 +37,17 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
var (
err error
- cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
- addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
- addr1.SetContainer(cnr)
- addr2.SetContainer(cnr)
- addr3.SetContainer(cnr)
- addr4.SetContainer(cnr)
-
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
@@ -90,7 +82,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(addr4)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(context.Background(), inhumePrm)
@@ -107,7 +99,6 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
locked := oidtest.Address()
@@ -123,13 +114,10 @@ func TestInhumeLocked(t *testing.T) {
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
+func metaInhume(db *meta.DB, target, tomb oid.Address) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- var tombAddr oid.Address
- tombAddr.SetContainer(target.Container())
- tombAddr.SetObject(tomb)
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(tomb)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index 9cccd7dad..a1e21ef25 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -3,14 +3,17 @@ package meta
import (
"context"
"errors"
+ "fmt"
"strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
@@ -76,43 +79,132 @@ func (db *DB) IterateExpired(ctx context.Context, epoch uint64, h ExpiredObjectH
}
func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) error {
- b := tx.Bucket(expEpochToObjectBucketName)
- c := b.Cursor()
- for k, _ := c.First(); k != nil; k, _ = c.Next() {
- expiresAfter, cnr, obj, err := parseExpirationEpochKey(k)
+ err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
+ cidBytes := cidFromAttributeBucket(name, objectV2.SysAttributeExpEpoch)
+ if cidBytes == nil {
+ cidBytes = cidFromAttributeBucket(name, objectV2.SysAttributeExpEpochNeoFS)
+ if cidBytes == nil {
+ return nil
+ }
+ }
+
+ var cnrID cid.ID
+ err := cnrID.Decode(cidBytes)
if err != nil {
- return err
+ return fmt.Errorf("could not parse container ID of expired bucket: %w", err)
}
- // bucket keys ordered by epoch, no need to continue lookup
- if expiresAfter >= epoch {
- return nil
- }
- if objectLocked(tx, cnr, obj) {
- continue
- }
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(obj)
- objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
- err = h(&ExpiredObject{
- typ: firstIrregularObjectType(tx, cnr, objKey),
- addr: addr,
+
+ return b.ForEachBucket(func(expKey []byte) error {
+ bktExpired := b.Bucket(expKey)
+ expiresAfter, err := strconv.ParseUint(string(expKey), 10, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
+ } else if expiresAfter >= epoch {
+ return nil
+ }
+
+ return bktExpired.ForEach(func(idKey, _ []byte) error {
+ var id oid.ID
+
+ err = id.Decode(idKey)
+ if err != nil {
+ return fmt.Errorf("could not parse ID of expired object: %w", err)
+ }
+
+ // Ignore locked objects.
+ //
+ // To slightly optimize performance we can check only REGULAR objects
+ // (only they can be locked), but it's more reliable.
+ if objectLocked(tx, cnrID, id) {
+ return nil
+ }
+
+ var addr oid.Address
+ addr.SetContainer(cnrID)
+ addr.SetObject(id)
+
+ return h(&ExpiredObject{
+ typ: firstIrregularObjectType(tx, cnrID, idKey),
+ addr: addr,
+ })
+ })
})
- if err == nil {
- continue
- }
- if errors.Is(err, ErrInterruptIterator) {
- return nil
- }
- return err
+ })
+
+ if errors.Is(err, ErrInterruptIterator) {
+ err = nil
}
- return nil
+
+ return err
}
-func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error {
+// IterateCoveredByTombstones iterates over all objects in DB which are covered
+// by tombstone with string address from tss. Locked objects are not included
+// (do not confuse with objects of type LOCK).
+//
+// If h returns ErrInterruptIterator, nil returns immediately.
+// Returns other errors of h directly.
+//
+// Does not modify tss.
+func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ return db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateCoveredByTombstones(tx, tss, h)
+ })
+}
+
+func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error {
+ bktGraveyard := tx.Bucket(graveyardBucketName)
+
+ err := bktGraveyard.ForEach(func(k, v []byte) error {
+ var addr oid.Address
+ if err := decodeAddressFromKey(&addr, v); err != nil {
+ return err
+ }
+ if _, ok := tss[addr.EncodeToString()]; ok {
+ var addr oid.Address
+
+ err := decodeAddressFromKey(&addr, k)
+ if err != nil {
+ return fmt.Errorf("could not parse address of the object under tombstone: %w", err)
+ }
+
+ if objectLocked(tx, addr.Container(), addr.Object()) {
+ return nil
+ }
+
+ return h(addr)
+ }
+
+ return nil
+ })
+
+ if errors.Is(err, ErrInterruptIterator) {
+ err = nil
+ }
+
+ return err
+}
+
+func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID) error) error {
var cid cid.ID
var oid oid.ID
- obj := objectSDK.New()
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
b58CID, postfix := parseContainerIDWithPrefix(&cid, name)
@@ -129,8 +221,8 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) e
}
return b.ForEach(func(k, v []byte) error {
- if oid.Decode(k) == nil && obj.Unmarshal(v) == nil {
- return f(cid, oid, obj)
+ if oid.Decode(k) == nil {
+ return f(cid, oid)
}
return nil
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 4c9579965..034a931d2 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -5,10 +5,10 @@ import (
"strconv"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -17,7 +17,6 @@ import (
func TestDB_IterateExpired(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
const epoch = 13
@@ -66,3 +65,59 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
return object2.AddressOf(obj)
}
+
+func TestDB_IterateCoveredByTombstones(t *testing.T) {
+ db := newDB(t)
+
+ ts := oidtest.Address()
+ protected1 := oidtest.Address()
+ protected2 := oidtest.Address()
+ protectedLocked := oidtest.Address()
+ garbage := oidtest.Address()
+
+ var prm meta.InhumePrm
+ var err error
+
+ prm.SetAddresses(protected1, protected2, protectedLocked)
+ prm.SetTombstoneAddress(ts)
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetAddresses(garbage)
+ prm.SetGCMark()
+
+ _, err = db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+
+ var handled []oid.Address
+
+ tss := map[string]oid.Address{
+ ts.EncodeToString(): ts,
+ }
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 3)
+ require.Contains(t, handled, protected1)
+ require.Contains(t, handled, protected2)
+ require.Contains(t, handled, protectedLocked)
+
+ err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
+ require.NoError(t, err)
+
+ handled = handled[:0]
+
+ err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error {
+ handled = append(handled, addr)
+ return nil
+ })
+ require.NoError(t, err)
+
+ require.Len(t, handled, 2)
+ require.NotContains(t, handled, protectedLocked)
+}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 2a0bd7f6a..37a574a02 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,7 +1,6 @@
package meta
import (
- "bytes"
"context"
"time"
@@ -48,12 +47,12 @@ func (l *ListPrm) SetCursor(cursor *Cursor) {
// ListRes contains values returned from ListWithCursor operation.
type ListRes struct {
- addrList []objectcore.Info
+ addrList []objectcore.AddressWithType
cursor *Cursor
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (l ListRes) AddressList() []objectcore.Info {
+func (l ListRes) AddressList() []objectcore.AddressWithType {
return l.addrList
}
@@ -62,37 +61,12 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
-// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
-type IterateOverContainersPrm struct {
- // Handler function executed upon containers in db.
- Handler func(context.Context, objectSDK.Type, cid.ID) error
-}
-
-// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type IterateOverObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
- // Handler function executed upon objects in db.
- Handler func(context.Context, *objectcore.Info) error
-}
-
-// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type CountAliveObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
-}
-
// ListWithCursor lists physical objects available in metabase starting from
-// cursor. Includes objects of all types. Does not include inhumed and expired
-// objects.
+// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
-// parameter is set to zero.
+// parameter set to zero.
func (db *DB) ListWithCursor(ctx context.Context, prm ListPrm) (res ListRes, err error) {
var (
startedAt = time.Now()
@@ -115,7 +89,7 @@ func (db *DB) ListWithCursor(ctx context.Context, prm ListPrm) (res ListRes, err
return res, ErrDegradedMode
}
- result := make([]objectcore.Info, 0, prm.count)
+ result := make([]objectcore.AddressWithType, 0, prm.count)
err = db.boltDB.View(func(tx *bbolt.Tx) error {
res.addrList, res.cursor, err = db.listWithCursor(tx, result, prm.count, prm.cursor)
@@ -125,10 +99,9 @@ func (db *DB) ListWithCursor(ctx context.Context, prm ListPrm) (res ListRes, err
return res, metaerr.Wrap(err)
}
-func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, cursor *Cursor) ([]objectcore.Info, *Cursor, error) {
+func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.AddressWithType, count int, cursor *Cursor) ([]objectcore.AddressWithType, *Cursor, error) {
threshold := cursor == nil // threshold is a flag to ignore cursor
var bucketName []byte
- var err error
c := tx.Cursor()
name, _ := c.First()
@@ -139,11 +112,10 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
- bc := newBucketCache()
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
- rawAddr := make([]byte, cidSize, addressKeySize)
-
- currEpoch := db.epochState.CurrentEpoch()
+ var rawAddr = make([]byte, cidSize, addressKeySize)
loop:
for ; name != nil; name, _ = c.Next() {
@@ -168,11 +140,8 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
- result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
- result, count, cursor, threshold, currEpoch)
- if err != nil {
- return nil, nil, err
- }
+ result, offset, cursor = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
+ result, count, cursor, threshold)
}
bucketName = name
if len(result) >= count {
@@ -187,7 +156,8 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
- cursor.inBucketOffset = bytes.Clone(offset)
+ cursor.inBucketOffset = make([]byte, len(offset))
+ copy(cursor.inBucketOffset, offset)
}
if len(result) == 0 {
@@ -196,41 +166,41 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
- cursor.bucketName = bytes.Clone(bucketName)
+ cursor.bucketName = make([]byte, len(bucketName))
+ copy(cursor.bucketName, bucketName)
return result, cursor, nil
}
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
-func selectNFromBucket(
- bc *bucketCache,
- bkt *bbolt.Bucket, // main bucket
+func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
+ graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
- to []objectcore.Info, // listing result
+ to []objectcore.AddressWithType, // listing result
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
- currEpoch uint64,
-) ([]objectcore.Info, []byte, *Cursor, error) {
+) ([]objectcore.AddressWithType, []byte, *Cursor) {
if cursor == nil {
cursor = new(Cursor)
}
+ count := len(to)
c := bkt.Cursor()
- k, v := c.First()
+ k, _ := c.First()
offset := cursor.inBucketOffset
if !threshold {
c.Seek(offset)
- k, v = c.Next() // we are looking for objects _after_ the cursor
+ k, _ = c.Next() // we are looking for objects _after_ the cursor
}
- for ; k != nil; k, v = c.Next() {
- if len(to) >= limit {
+ for ; k != nil; k, _ = c.Next() {
+ if count >= limit {
break
}
@@ -240,43 +210,18 @@ func selectNFromBucket(
}
offset = k
- graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
- garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return nil, nil, nil, err
- }
-
- expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
- if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
- continue
- }
-
- var isLinkingObj bool
- var ecInfo *objectcore.ECInfo
- if objType == objectSDK.TypeRegular {
- isLinkingObj = isLinkObject(&o)
- ecHeader := o.ECHeader()
- if ecHeader != nil {
- ecInfo = &objectcore.ECInfo{
- ParentID: ecHeader.Parent(),
- Index: ecHeader.Index(),
- Total: ecHeader.Total(),
- }
- }
- }
-
var a oid.Address
a.SetContainer(cnt)
a.SetObject(obj)
- to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
+ to = append(to, objectcore.AddressWithType{Address: a, Type: objType})
+ count++
}
- return to, offset, cursor, nil
+ return to, offset, cursor
}
func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte) {
@@ -292,211 +237,3 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
-
-// IterateOverContainers lists physical containers available in metabase starting from first.
-func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverContainers(ctx, tx, prm)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
- var containerID cid.ID
- for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
- c := tx.Cursor()
- for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
- cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
- if cidRaw == nil {
- continue
- }
- var cnt cid.ID
- copy(cnt[:], containerID[:])
- var objType objectSDK.Type
- switch prefix[0] {
- case primaryPrefix:
- objType = objectSDK.TypeRegular
- case lockersPrefix:
- objType = objectSDK.TypeLock
- case tombstonePrefix:
- objType = objectSDK.TypeTombstone
- default:
- continue
- }
- err := prm.Handler(ctx, objType, cnt)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
-func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverObjectsInContainer(ctx, tx, prm)
- })
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
- var prefix byte
- switch prm.ObjectType {
- case objectSDK.TypeRegular:
- prefix = primaryPrefix
- case objectSDK.TypeLock:
- prefix = lockersPrefix
- case objectSDK.TypeTombstone:
- prefix = tombstonePrefix
- default:
- return nil
- }
- bucketName := []byte{prefix}
- bucketName = append(bucketName, prm.ContainerID[:]...)
-
- bkt := tx.Bucket(bucketName)
- if bkt == nil {
- return nil
- }
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
- c := bkt.Cursor()
- k, v := c.First()
-
- for ; k != nil; k, v = c.Next() {
- var obj oid.ID
- if err := obj.Decode(k); err != nil {
- break
- }
-
- if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
- continue
- }
-
- var isLinkingObj bool
- var ecInfo *objectcore.ECInfo
- if prm.ObjectType == objectSDK.TypeRegular {
- var o objectSDK.Object
- if err := o.Unmarshal(v); err != nil {
- return err
- }
- isLinkingObj = isLinkObject(&o)
- ecHeader := o.ECHeader()
- if ecHeader != nil {
- ecInfo = &objectcore.ECInfo{
- ParentID: ecHeader.Parent(),
- Index: ecHeader.Index(),
- Total: ecHeader.Total(),
- }
- }
- }
-
- var a oid.Address
- a.SetContainer(prm.ContainerID)
- a.SetObject(obj)
- objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
- err := prm.Handler(ctx, &objInfo)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
-func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
- }()
- _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return 0, ErrDegradedMode
- }
-
- var prefix byte
- switch prm.ObjectType {
- case objectSDK.TypeRegular:
- prefix = primaryPrefix
- case objectSDK.TypeLock:
- prefix = lockersPrefix
- case objectSDK.TypeTombstone:
- prefix = tombstonePrefix
- default:
- return 0, nil
- }
- bucketName := []byte{prefix}
- bucketName = append(bucketName, prm.ContainerID[:]...)
- var count uint64
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(bucketName)
- if bkt == nil {
- return nil
- }
- graveyardBkt := tx.Bucket(graveyardBucketName)
- garbageBkt := tx.Bucket(garbageBucketName)
- c := bkt.Cursor()
- k, _ := c.First()
- for ; k != nil; k, _ = c.Next() {
- if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
- continue
- }
- count++
- }
- return nil
- })
- success = err == nil
- return count, metaerr.Wrap(err)
-}
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 02985991c..abb55c9d1 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -3,17 +3,14 @@ package meta_test
import (
"context"
"errors"
- "strconv"
+ "sort"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@@ -21,8 +18,6 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
-
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@@ -40,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
})) // faster single-thread generation
obj := testutil.GenerateObject()
- for i := range 100_000 { // should be a multiple of all batch sizes
+ for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
obj.SetID(oidtest.ID())
if i%9 == 0 { // let's have 9 objects per container
obj.SetContainerID(cidtest.ID())
@@ -56,10 +51,10 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
b.ResetTimer()
b.ReportAllocs()
- for range b.N {
+ for i := 0; i < b.N; i++ {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
- if !errors.Is(err, meta.ErrEndOfListing) {
+ if err != meta.ErrEndOfListing {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@@ -74,20 +69,17 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
+ db := newDB(t)
+
const (
- currEpoch = 100
- expEpoch = currEpoch - 1
containers = 5
- total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
+ total = containers * 4 // regular + ts + child + lock
)
- db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- expected := make([]object.Info, 0, total)
+ expected := make([]object.AddressWithType, 0, total)
// fill metabase with objects
- for range containers {
+ for i := 0; i < containers; i++ {
containerID := cidtest.ID()
// add one regular object
@@ -95,21 +87,21 @@ func TestLisObjectsWithCursor(t *testing.T) {
obj.SetType(objectSDK.TypeRegular)
err := putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+ expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
// add one tombstone
obj = testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeTombstone)
err = putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone})
+ expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone})
// add one lock
obj = testutil.GenerateObjectWithCID(containerID)
obj.SetType(objectSDK.TypeLock)
err = putBig(db, obj)
require.NoError(t, err)
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeLock})
+ expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeLock})
// add one inhumed (do not include into expected)
obj = testutil.GenerateObjectWithCID(containerID)
@@ -117,7 +109,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
ts := testutil.GenerateObjectWithCID(containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
require.NoError(t, err)
// add one child object (do not include parent into expected)
@@ -131,32 +123,14 @@ func TestLisObjectsWithCursor(t *testing.T) {
child.SetSplitID(splitID)
err = putBig(db, child)
require.NoError(t, err)
- expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
-
- // add expired object (do not include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
-
- // add non-expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
-
- // add locked expired object (include into expected)
- obj = testutil.GenerateObjectWithCID(containerID)
- objID := oidtest.ID()
- obj.SetID(objID)
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
- require.NoError(t, metaPut(db, obj, nil))
- require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
- expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
+ expected = append(expected, object.AddressWithType{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
}
+ expected = sortAddresses(expected)
+
t.Run("success with various count", func(t *testing.T) {
for countPerReq := 1; countPerReq <= total; countPerReq++ {
- got := make([]object.Info, 0, total)
+ got := make([]object.AddressWithType, 0, total)
res, cursor, err := metaListWithCursor(db, uint32(countPerReq), nil)
require.NoError(t, err, "count:%d", countPerReq)
@@ -167,7 +141,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expectedIterations--
}
- for range expectedIterations {
+ for i := 0; i < expectedIterations; i++ {
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.NoError(t, err, "count:%d", countPerReq)
got = append(got, res...)
@@ -175,7 +149,9 @@ func TestLisObjectsWithCursor(t *testing.T) {
_, _, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.ErrorIs(t, err, meta.ErrEndOfListing, "count:%d", countPerReq, cursor)
- require.ElementsMatch(t, expected, got, "count:%d", countPerReq)
+
+ got = sortAddresses(got)
+ require.Equal(t, expected, got, "count:%d", countPerReq)
}
})
@@ -189,14 +165,13 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
const total = 5
expected := make(map[string]int, total)
// fill metabase with objects
- for range total {
+ for i := 0; i < total; i++ {
obj := testutil.GenerateObject()
err := putBig(db, obj)
require.NoError(t, err)
@@ -213,7 +188,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
}
// add new objects
- for range total {
+ for i := 0; i < total; i++ {
obj := testutil.GenerateObject()
err = putBig(db, obj)
require.NoError(t, err)
@@ -236,9 +211,17 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
for _, v := range expected {
require.Equal(t, 1, v)
}
+
}
-func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]object.Info, *meta.Cursor, error) {
+func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
+ sort.Slice(addrWithType, func(i, j int) bool {
+ return addrWithType[i].Address.EncodeToString() < addrWithType[j].Address.EncodeToString()
+ })
+ return addrWithType
+}
+
+func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]object.AddressWithType, *meta.Cursor, error) {
var listPrm meta.ListPrm
listPrm.SetCount(count)
listPrm.SetCursor(cursor)
@@ -246,59 +229,3 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec
r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
-
-func TestIterateOver(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- const total uint64 = 5
- for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
- var expected []*objectSDK.Object
- // fill metabase with objects
- cid := cidtest.ID()
- for range total {
- obj := testutil.GenerateObjectWithCID(cid)
- obj.SetType(typ)
- err := metaPut(db, obj, nil)
- require.NoError(t, err)
- expected = append(expected, obj)
- }
-
- var metaIter meta.IterateOverObjectsInContainerPrm
- var count uint64
- metaIter.Handler = func(context.Context, *object.Info) error {
- count++
- return nil
- }
- metaIter.ContainerID = cid
- metaIter.ObjectType = typ
- err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
- require.NoError(t, err)
- require.Equal(t, total, count)
-
- var metaCount meta.CountAliveObjectsInContainerPrm
- metaCount.ContainerID = cid
- metaCount.ObjectType = typ
- res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
- require.NoError(t, err)
- require.Equal(t, res, total)
-
- err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
- require.NoError(t, err)
-
- res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
- require.NoError(t, err)
- require.Equal(t, uint64(3), res)
- }
- var count int
- var metaPrm meta.IterateOverContainersPrm
- metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
- count++
- return nil
- }
- err := db.IterateOverContainers(context.Background(), metaPrm)
- require.NoError(t, err)
- require.Equal(t, 3, count)
-}
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index f4cb9e53b..30a31ab87 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -4,10 +4,8 @@ import (
"bytes"
"context"
"fmt"
- "slices"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -64,7 +62,9 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.
return ErrReadOnlyMode
}
- assert.False(len(locked) == 0, "empty locked list")
+ if len(locked) == 0 {
+ panic("empty locked list")
+ }
err := db.lockInternal(locked, cnr, locker)
success = err == nil
@@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
}
key := make([]byte, cidSize)
- return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
@@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
var unlockedObjects []oid.Address
- if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
+ if err := db.boltDB.Update(func(tx *bbolt.Tx) error {
for i := range lockers {
unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
@@ -162,11 +162,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- return objectLockedWithCache(nil, tx, idCnr, idObj)
-}
-
-func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
- bucketLocked := getLockedBucket(bc, tx)
+ bucketLocked := tx.Bucket(bucketNameLocked)
if bucketLocked != nil {
key := make([]byte, cidSize)
idCnr.Encode(key)
@@ -179,31 +175,6 @@ func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oi
return false
}
-// return `LOCK` id's if specified object is locked in the specified container.
-func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) {
- var lockers []oid.ID
- bucketLocked := tx.Bucket(bucketNameLocked)
- if bucketLocked != nil {
- key := make([]byte, cidSize)
- idCnr.Encode(key)
- bucketLockedContainer := bucketLocked.Bucket(key)
- if bucketLockedContainer != nil {
- binObjIDs, err := decodeList(bucketLockedContainer.Get(objectKey(idObj, key)))
- if err != nil {
- return nil, fmt.Errorf("decode list of object lockers: %w", err)
- }
- for _, binObjID := range binObjIDs {
- var id oid.ID
- if err = id.Decode(binObjID); err != nil {
- return nil, err
- }
- lockers = append(lockers, id)
- }
- }
- }
- return lockers, nil
-}
-
// releases all records about the objects locked by the locker.
// Returns slice of unlocked object ID's or an error.
//
@@ -254,7 +225,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr)
} else {
// exclude locker
- keyLockers = slices.Delete(keyLockers, i, i+1)
+ keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
v, err = encodeList(keyLockers)
if err != nil {
@@ -273,6 +244,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
return nil
})
+
if err != nil {
return nil, err
}
@@ -354,36 +326,3 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e
success = err == nil
return res, err
}
-
-// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found
-// object is considered as non-locked.
-//
-// Returns only non-logical errors related to underlying database.
-func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks",
- trace.WithAttributes(
- attribute.String("address", addr.EncodeToString()),
- ))
- defer span.End()
-
- db.modeMtx.RLock()
- defer db.modeMtx.RUnlock()
-
- if db.mode.NoMetabase() {
- return res, ErrDegradedMode
- }
- err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res, err = getLocks(tx, addr.Container(), addr.Object())
- return nil
- }))
- success = err == nil
- return res, err
-}
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 341ff9ad1..834ab07a7 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -21,7 +21,6 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@@ -73,9 +72,7 @@ func TestDB_Lock(t *testing.T) {
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- tombAddr := oidtest.Address()
- tombAddr.SetContainer(objAddr.Container())
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
@@ -91,9 +88,7 @@ func TestDB_Lock(t *testing.T) {
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- tombAddr = oidtest.Address()
- tombAddr.SetContainer(objAddr.Container())
- inhumePrm.SetTombstoneAddress(tombAddr)
+ inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
})
@@ -107,7 +102,7 @@ func TestDB_Lock(t *testing.T) {
var objLockedErr *apistatus.ObjectLocked
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr.Object())
+ err := metaInhume(db, objAddr, lockAddr)
require.ErrorAs(t, err, &objLockedErr)
// free locked object
@@ -159,7 +154,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
- for i := range objsNum {
+ for i := 0; i < objsNum; i++ {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
res, err = db.Inhume(context.Background(), inhumePrm)
@@ -187,7 +182,6 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
// put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@@ -209,7 +203,6 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
// existing and locked objs
@@ -259,7 +252,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK
lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
- for range numOfLockedObjs {
+ for i := 0; i < numOfLockedObjs; i++ {
obj := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, obj)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/metrics.go b/pkg/local_object_storage/metabase/metrics.go
index d673560c7..fc971bd81 100644
--- a/pkg/local_object_storage/metabase/metrics.go
+++ b/pkg/local_object_storage/metabase/metrics.go
@@ -9,7 +9,7 @@ import (
type Metrics interface {
SetParentID(parentID string)
- SetMode(m mode.ComponentMode)
+ SetMode(m mode.Mode)
Close()
AddMethodDuration(method string, d time.Duration, success bool)
@@ -18,6 +18,6 @@ type Metrics interface {
type noopMetrics struct{}
func (m *noopMetrics) SetParentID(string) {}
-func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) SetMode(mode.Mode) {}
func (m *noopMetrics) Close() {}
func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {}
diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go
index 7edb96384..a18095f3e 100644
--- a/pkg/local_object_storage/metabase/mode.go
+++ b/pkg/local_object_storage/metabase/mode.go
@@ -9,7 +9,7 @@ import (
// SetMode sets the metabase mode of operation.
// If the mode assumes no operation metabase, the database is closed.
-func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
+func (db *DB) SetMode(m mode.Mode) error {
db.modeMtx.Lock()
defer db.modeMtx.Unlock()
@@ -18,24 +18,29 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
}
if !db.mode.NoMetabase() {
- if err := db.Close(ctx); err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
- if m.NoMetabase() {
+ var err error
+ switch {
+ case m.NoMetabase():
db.boltDB = nil
- } else {
- err := db.openDB(ctx, m)
- if err == nil && !m.ReadOnly() {
- err = db.Init(ctx)
- }
- if err != nil {
- return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
- }
+ case m.ReadOnly():
+ err = db.Open(context.TODO(), true)
+ default:
+ err = db.Open(context.TODO(), false)
+ }
+ if err == nil && !m.NoMetabase() && !m.ReadOnly() {
+ err = db.Init()
+ }
+
+ if err != nil {
+ return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
db.mode = m
- db.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+ db.metrics.SetMode(m)
return nil
}
diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go
deleted file mode 100644
index 28b42283f..000000000
--- a/pkg/local_object_storage/metabase/mode_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package meta
-
-import (
- "context"
- "path/filepath"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
-)
-
-type epochStateTest struct{}
-
-func (s epochStateTest) CurrentEpoch() uint64 {
- return 0
-}
-
-func Test_Mode(t *testing.T) {
- t.Parallel()
- bdb := New([]Option{
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochStateTest{}),
- }...)
-
- require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly))
- require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
- require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
-
- require.NoError(t, bdb.Open(context.Background(), mode.Degraded))
- require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Init(context.Background()))
- require.Nil(t, bdb.boltDB)
- require.NoError(t, bdb.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/metabase/movable.go b/pkg/local_object_storage/metabase/movable.go
new file mode 100644
index 000000000..763e49a5d
--- /dev/null
+++ b/pkg/local_object_storage/metabase/movable.go
@@ -0,0 +1,144 @@
+package meta
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ToMoveItPrm groups the parameters of ToMoveIt operation.
+type ToMoveItPrm struct {
+ addr oid.Address
+}
+
+// ToMoveItRes groups the resulting values of ToMoveIt operation.
+type ToMoveItRes struct{}
+
+// SetAddress sets address of the object to move into another shard.
+func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
+ p.addr = addr
+}
+
+// DoNotMovePrm groups the parameters of DoNotMove operation.
+type DoNotMovePrm struct {
+ addr oid.Address
+}
+
+// DoNotMoveRes groups the resulting values of DoNotMove operation.
+type DoNotMoveRes struct{}
+
+// SetAddress sets address of the object to prevent moving into another shard.
+func (p *DoNotMovePrm) SetAddress(addr oid.Address) {
+ p.addr = addr
+}
+
+// MovablePrm groups the parameters of Movable operation.
+type MovablePrm struct{}
+
+// MovableRes groups the resulting values of Movable operation.
+type MovableRes struct {
+ addrList []oid.Address
+}
+
+// AddressList returns resulting addresses of Movable operation.
+func (p MovableRes) AddressList() []oid.Address {
+ return p.addrList
+}
+
+// ToMoveIt marks objects to move it into another shard. This useful for
+// faster HRW fetching.
+func (db *DB) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (res ToMoveItRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.ToMoveIt",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return res, ErrDegradedMode
+ } else if db.mode.ReadOnly() {
+ return res, ErrReadOnlyMode
+ }
+
+ key := make([]byte, addressKeySize)
+ key = addressKey(prm.addr, key)
+
+ err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ toMoveIt := tx.Bucket(toMoveItBucketName)
+ return toMoveIt.Put(key, zeroValue)
+ })
+
+ return res, metaerr.Wrap(err)
+}
+
+// DoNotMove removes `MoveIt` mark from the object.
+func (db *DB) DoNotMove(prm DoNotMovePrm) (res DoNotMoveRes, err error) {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return res, ErrDegradedMode
+ } else if db.mode.ReadOnly() {
+ return res, ErrReadOnlyMode
+ }
+
+ key := make([]byte, addressKeySize)
+ key = addressKey(prm.addr, key)
+
+ err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ toMoveIt := tx.Bucket(toMoveItBucketName)
+ return toMoveIt.Delete(key)
+ })
+
+ return res, metaerr.Wrap(err)
+}
+
+// Movable returns list of marked objects to move into other shard.
+func (db *DB) Movable(_ MovablePrm) (MovableRes, error) {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return MovableRes{}, ErrDegradedMode
+ }
+
+ var strAddrs []string
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ toMoveIt := tx.Bucket(toMoveItBucketName)
+ return toMoveIt.ForEach(func(k, v []byte) error {
+ strAddrs = append(strAddrs, string(k))
+
+ return nil
+ })
+ })
+ if err != nil {
+ return MovableRes{}, metaerr.Wrap(err)
+ }
+
+ // we can parse strings to structures in-place, but probably it seems
+ // more efficient to keep bolt db TX code smaller because it might be
+ // bottleneck.
+ addrs := make([]oid.Address, len(strAddrs))
+
+ for i := range strAddrs {
+ err = decodeAddressFromKey(&addrs[i], []byte(strAddrs[i]))
+ if err != nil {
+ return MovableRes{}, metaerr.Wrap(fmt.Errorf("can't parse object address %v: %w",
+ strAddrs[i], err))
+ }
+ }
+
+ return MovableRes{
+ addrList: addrs,
+ }, nil
+}
diff --git a/pkg/local_object_storage/metabase/movable_test.go b/pkg/local_object_storage/metabase/movable_test.go
new file mode 100644
index 000000000..51e7e6d74
--- /dev/null
+++ b/pkg/local_object_storage/metabase/movable_test.go
@@ -0,0 +1,84 @@
+package meta_test
+
+import (
+ "context"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDB_Movable(t *testing.T) {
+ db := newDB(t)
+
+ raw1 := testutil.GenerateObject()
+ raw2 := testutil.GenerateObject()
+
+ // put two objects in metabase
+ err := putBig(db, raw1)
+ require.NoError(t, err)
+
+ err = putBig(db, raw2)
+ require.NoError(t, err)
+
+ // check if toMoveIt index empty
+ toMoveList, err := metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, toMoveList, 0)
+
+ // mark to move object2
+ err = metaToMoveIt(db, object.AddressOf(raw2))
+ require.NoError(t, err)
+
+ // check if toMoveIt index contains address of object 2
+ toMoveList, err = metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, toMoveList, 1)
+ require.Contains(t, toMoveList, object.AddressOf(raw2))
+
+ // remove from toMoveIt index non existing address
+ err = metaDoNotMove(db, object.AddressOf(raw1))
+ require.NoError(t, err)
+
+ // check if toMoveIt index hasn't changed
+ toMoveList, err = metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, toMoveList, 1)
+
+ // remove from toMoveIt index existing address
+ err = metaDoNotMove(db, object.AddressOf(raw2))
+ require.NoError(t, err)
+
+ // check if toMoveIt index is empty now
+ toMoveList, err = metaMovable(db)
+ require.NoError(t, err)
+ require.Len(t, toMoveList, 0)
+}
+
+func metaToMoveIt(db *meta.DB, addr oid.Address) error {
+ var toMovePrm meta.ToMoveItPrm
+ toMovePrm.SetAddress(addr)
+
+ _, err := db.ToMoveIt(context.Background(), toMovePrm)
+ return err
+}
+
+func metaMovable(db *meta.DB) ([]oid.Address, error) {
+ r, err := db.Movable(meta.MovablePrm{})
+ if err != nil {
+ return nil, err
+ }
+
+ return r.AddressList(), nil
+}
+
+func metaDoNotMove(db *meta.DB, addr oid.Address) error {
+ var doNotMovePrm meta.DoNotMovePrm
+ doNotMovePrm.SetAddress(addr)
+
+ _, err := db.DoNotMove(doNotMovePrm)
+ return err
+}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 5e1bbfe9e..28f12851f 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
gio "io"
- "strconv"
"time"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -14,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -35,14 +33,10 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
-
- indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
-type PutRes struct {
- Inserted bool
-}
+type PutRes struct{}
// SetObject is a Put option to set object to save.
func (p *PutPrm) SetObject(obj *objectSDK.Object) {
@@ -54,13 +48,10 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
-func (p *PutPrm) SetIndexAttributes(v bool) {
- p.indexAttributes = v
-}
-
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
@@ -94,13 +85,11 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
currEpoch := db.epochState.CurrentEpoch()
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
- var e error
- res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
- return e
+ return db.put(tx, prm.obj, prm.id, nil, currEpoch)
})
if err == nil {
success = true
- storagelog.Write(ctx, db.log,
+ storagelog.Write(db.log,
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
storagelog.OpField("metabase PUT"))
}
@@ -112,36 +101,28 @@ func (db *DB) put(tx *bbolt.Tx,
obj *objectSDK.Object,
id []byte,
si *objectSDK.SplitInfo,
- currEpoch uint64,
- indexAttributes bool,
-) (PutRes, error) {
+ currEpoch uint64) error {
cnr, ok := obj.ContainerID()
if !ok {
- return PutRes{}, errors.New("missing container in object")
- }
-
- var ecParentAddress oid.Address
- if ecHeader := obj.ECHeader(); ecHeader != nil {
- ecParentAddress.SetContainer(cnr)
- ecParentAddress.SetObject(ecHeader.Parent())
+ return errors.New("missing container in object")
}
isParent := si != nil
- exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch)
+ exists, err := db.exists(tx, objectCore.AddressOf(obj), currEpoch)
var splitInfoError *objectSDK.SplitInfoError
if errors.As(err, &splitInfoError) {
exists = true // object exists, however it is virtual
} else if err != nil {
- return PutRes{}, err // return any error besides SplitInfoError
+ return err // return any error besides SplitInfoError
}
if exists {
- return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
+ return db.updateObj(tx, obj, id, si, isParent)
}
- return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
+ return db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
}
func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
@@ -151,7 +132,7 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
// When storage engine moves objects between different sub-storages,
// it calls metabase.Put method with new storage ID, thus triggering this code.
if !isParent && id != nil {
- return setStorageID(tx, objectCore.AddressOf(obj), id, true)
+ return updateStorageID(tx, objectCore.AddressOf(obj), id)
}
// when storage already has last object in split hierarchy and there is
@@ -164,14 +145,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
return nil
}
-func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
+ err = db.put(tx, par, id, parentSI, currEpoch)
if err != nil {
return err
}
@@ -179,19 +160,17 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
- return fmt.Errorf("put unique indexes: %w", err)
+ return fmt.Errorf("can't put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
- return fmt.Errorf("put list indexes: %w", err)
+ return fmt.Errorf("can't put list indexes: %w", err)
}
- if indexAttributes {
- err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("put fake bucket tree indexes: %w", err)
- }
+ err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
}
// update container volume size estimation
@@ -203,129 +182,99 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
}
if !isParent {
- if err = db.incCounters(tx, cnr, IsUserObject(obj)); err != nil {
- return err
+ err = db.updateCounter(tx, phy, 1, true)
+ if err != nil {
+ return fmt.Errorf("could not increase phy object counter: %w", err)
+ }
+
+ // it is expected that putting an unavailable object is
+ // impossible and should be handled on the higher levels
+ err = db.updateCounter(tx, logical, 1, true)
+ if err != nil {
+ return fmt.Errorf("could not increase logical object counter: %w", err)
}
}
return nil
}
-func putUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, si *objectSDK.SplitInfo, id []byte) error {
+func putUniqueIndexes(
+ tx *bbolt.Tx,
+ obj *objectSDK.Object,
+ si *objectSDK.SplitInfo,
+ id []byte,
+) error {
isParent := si != nil
addr := objectCore.AddressOf(obj)
+ cnr := addr.Container()
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
+ // add value to primary unique bucket
if !isParent {
- err := putRawObjectData(tx, obj, bucketName, addr, objKey)
+ switch obj.Type() {
+ case objectSDK.TypeRegular:
+ bucketName = primaryBucketName(cnr, bucketName)
+ case objectSDK.TypeTombstone:
+ bucketName = tombstoneBucketName(cnr, bucketName)
+ case objectSDK.TypeLock:
+ bucketName = bucketNameLockers(cnr, bucketName)
+ default:
+ return ErrUnknownObjectType
+ }
+
+ rawObject, err := obj.CutPayload().Marshal()
+ if err != nil {
+ return fmt.Errorf("can't marshal object header: %w", err)
+ }
+
+ err = putUniqueIndexItem(tx, namedBucketItem{
+ name: bucketName,
+ key: objKey,
+ val: rawObject,
+ })
if err != nil {
return err
}
+
+ // index storageID if it is present
if id != nil {
- if err = setStorageID(tx, objectCore.AddressOf(obj), id, false); err != nil {
+ err = putUniqueIndexItem(tx, namedBucketItem{
+ name: smallBucketName(cnr, bucketName),
+ key: objKey,
+ val: id,
+ })
+ if err != nil {
return err
}
}
}
- if err := putExpirationEpoch(tx, obj, addr, objKey); err != nil {
- return err
- }
-
- return putSplitInfo(tx, obj, bucketName, addr, si, objKey)
-}
-
-func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, objKey []byte) error {
- switch obj.Type() {
- case objectSDK.TypeRegular:
- bucketName = primaryBucketName(addr.Container(), bucketName)
- case objectSDK.TypeTombstone:
- bucketName = tombstoneBucketName(addr.Container(), bucketName)
- case objectSDK.TypeLock:
- bucketName = bucketNameLockers(addr.Container(), bucketName)
- default:
- return ErrUnknownObjectType
- }
- rawObject, err := obj.CutPayload().Marshal()
- if err != nil {
- return fmt.Errorf("marshal object header: %w", err)
- }
- return putUniqueIndexItem(tx, namedBucketItem{
- name: bucketName,
- key: objKey,
- val: rawObject,
- })
-}
-
-func putExpirationEpoch(tx *bbolt.Tx, obj *objectSDK.Object, addr oid.Address, objKey []byte) error {
- if expEpoch, ok := hasExpirationEpoch(obj); ok {
- err := putUniqueIndexItem(tx, namedBucketItem{
- name: expEpochToObjectBucketName,
- key: expirationEpochKey(expEpoch, addr.Container(), addr.Object()),
- val: zeroValue,
- })
- if err != nil {
- return err
- }
- val := make([]byte, epochSize)
- binary.LittleEndian.PutUint64(val, expEpoch)
- err = putUniqueIndexItem(tx, namedBucketItem{
- name: objectToExpirationEpochBucketName(addr.Container(), make([]byte, bucketKeySize)),
- key: objKey,
- val: val,
- })
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func putSplitInfo(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, si *objectSDK.SplitInfo, objKey []byte) error {
+ // index root object
if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() {
- if ecHead := obj.ECHeader(); ecHead != nil {
- parentID := ecHead.Parent()
- if ecHead.ParentSplitID() != nil {
- parentSplitParentID := ecHead.ParentSplitParentID()
- if parentSplitParentID == nil {
- return nil
- }
+ var (
+ err error
+ splitInfo []byte
+ )
- si = objectSDK.NewSplitInfo()
- si.SetSplitID(ecHead.ParentSplitID())
- si.SetLastPart(ecHead.Parent())
-
- parentID = *parentSplitParentID
+ if isParent {
+ splitInfo, err = si.Marshal()
+ if err != nil {
+ return fmt.Errorf("can't marshal split info: %w", err)
}
- objKey = objectKey(parentID, objKey)
}
- return updateSplitInfoIndex(tx, objKey, addr.Container(), bucketName, si)
+
+ err = putUniqueIndexItem(tx, namedBucketItem{
+ name: rootBucketName(cnr, bucketName),
+ key: objKey,
+ val: splitInfo,
+ })
+ if err != nil {
+ return err
+ }
}
- return nil
-}
-func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName []byte, si *objectSDK.SplitInfo) error {
- return updateUniqueIndexItem(tx, namedBucketItem{
- name: rootBucketName(cnr, bucketName),
- key: objKey,
- }, func(old, _ []byte) ([]byte, error) {
- switch {
- case si == nil && old == nil:
- return []byte{}, nil
- case si == nil:
- return old, nil
- case old == nil:
- return si.Marshal()
- default:
- oldSI := objectSDK.NewSplitInfo()
- if err := oldSI.Unmarshal(old); err != nil {
- return nil, err
- }
- si = util.MergeSplitInfo(si, oldSI)
- return si.Marshal()
- }
- })
+ return nil
}
type updateIndexItemFunc = func(tx *bbolt.Tx, item namedBucketItem) error
@@ -336,6 +285,18 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
objKey := objectKey(idObj, make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
+ cs, _ := obj.PayloadChecksum()
+
+ // index payload hashes
+ err := f(tx, namedBucketItem{
+ name: payloadHashBucketName(cnr, bucketName),
+ key: cs.Value(),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+
idParent, ok := obj.ParentID()
// index parent ids
@@ -362,74 +323,28 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
}
}
- if ech := obj.ECHeader(); ech != nil {
- err := f(tx, namedBucketItem{
- name: ecInfoBucketName(cnr, bucketName),
- key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
- val: objKey,
- })
- if err != nil {
- return err
- }
-
- if ech.ParentSplitID() != nil {
- objKey := objectKey(ech.Parent(), make([]byte, objectKeySize))
- err := f(tx, namedBucketItem{
- name: splitBucketName(cnr, bucketName),
- key: ech.ParentSplitID().ToV2(),
- val: objKey,
- })
- if err != nil {
- return err
- }
- }
-
- if parentSplitParentID := ech.ParentSplitParentID(); parentSplitParentID != nil {
- objKey := objectKey(ech.Parent(), make([]byte, objectKeySize))
- err := f(tx, namedBucketItem{
- name: parentBucketName(cnr, bucketName),
- key: objectKey(*parentSplitParentID, make([]byte, objectKeySize)),
- val: objKey,
- })
- if err != nil {
- return err
- }
- }
- }
-
return nil
}
-var indexedAttributes = map[string]struct{}{
- "S3-Access-Box-CRDT-Name": {},
- objectSDK.AttributeFilePath: {},
-}
-
-// IsAtrributeIndexed returns True if attribute is indexed by metabase.
-func IsAtrributeIndexed(attr string) bool {
- _, found := indexedAttributes[attr]
- return found
-}
-
func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
id, _ := obj.ID()
cnr, _ := obj.ContainerID()
objKey := objectKey(id, make([]byte, objectKeySize))
+ attrs := obj.Attributes()
+
key := make([]byte, bucketKeySize)
- var attrs []objectSDK.Attribute
- if obj.ECHeader() != nil {
- attrs = obj.ECHeader().ParentAttributes()
- objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
- } else {
- attrs = obj.Attributes()
+ err := f(tx, namedBucketItem{
+ name: ownerBucketName(cnr, key),
+ key: []byte(obj.OwnerID().EncodeToString()),
+ val: objKey,
+ })
+ if err != nil {
+ return err
}
// user specified attributes
for i := range attrs {
- if !IsAtrributeIndexed(attrs[i].Key()) {
- continue
- }
key = attributeBucketName(cnr, attrs[i].Key(), key)
err := f(tx, namedBucketItem{
name: key,
@@ -444,80 +359,45 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
-func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
- attributes := obj.Attributes()
- if ech := obj.ECHeader(); ech != nil {
- attributes = ech.ParentAttributes()
- }
- for _, attr := range attributes {
- if attr.Key() == objectV2.SysAttributeExpEpoch {
- expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
- return expEpoch, err == nil
- }
- }
- return 0, false
-}
-
-type bucketContainer interface {
- Bucket([]byte) *bbolt.Bucket
- CreateBucket([]byte) (*bbolt.Bucket, error)
- CreateBucketIfNotExists([]byte) (*bbolt.Bucket, error)
-}
-
-func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Bucket, error) {
- if bkt := tx.Bucket(name); bkt != nil {
- return bkt, nil
- }
- return tx.CreateBucket(name)
-}
-
-func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
- bkt, err := createBucketLikelyExists(tx, item.name)
- if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
- }
-
- data, err := update(bkt.Get(item.key), item.val)
- if err != nil {
- return err
- }
- return bkt.Put(item.key, data)
-}
-
func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
+ bkt, err := tx.CreateBucketIfNotExists(item.name)
+ if err != nil {
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
+ }
+
+ return bkt.Put(item.key, item.val)
}
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := createBucketLikelyExists(tx, item.name)
+ bkt, err := tx.CreateBucketIfNotExists(item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
- fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
+ fkbtRoot, err := bkt.CreateBucketIfNotExists(item.key)
if err != nil {
- return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
+ return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
}
return fkbtRoot.Put(item.val, zeroValue)
}
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := createBucketLikelyExists(tx, item.name)
+ bkt, err := tx.CreateBucketIfNotExists(item.name)
if err != nil {
- return fmt.Errorf("create index %v: %w", item.name, err)
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
- return fmt.Errorf("decode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
- return fmt.Errorf("encode leaf list %v: %w", item.key, err)
+ return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)
@@ -593,26 +473,51 @@ func getVarUint(data []byte) (uint64, int, error) {
}
}
-// setStorageID for existing objects if they were moved from one
+// updateStorageID for existing objects if they were moved from one
// storage location to another.
-func setStorageID(tx *bbolt.Tx, addr oid.Address, id []byte, override bool) error {
+func updateStorageID(tx *bbolt.Tx, addr oid.Address, id []byte) error {
key := make([]byte, bucketKeySize)
- bkt, err := createBucketLikelyExists(tx, smallBucketName(addr.Container(), key))
+ bkt, err := tx.CreateBucketIfNotExists(smallBucketName(addr.Container(), key))
if err != nil {
return err
}
- key = objectKey(addr.Object(), key)
- if override || bkt.Get(key) == nil {
- return bkt.Put(key, id)
- }
- return nil
+
+ return bkt.Put(objectKey(addr.Object(), key), id)
}
// updateSpliInfo for existing objects if storage filled with extra information
// about last object in split hierarchy or linking object.
func updateSplitInfo(tx *bbolt.Tx, addr oid.Address, from *objectSDK.SplitInfo) error {
- objKey := objectKey(addr.Object(), make([]byte, bucketKeySize))
- return updateSplitInfoIndex(tx, objKey, addr.Container(), make([]byte, bucketKeySize), from)
+ key := make([]byte, bucketKeySize)
+ bkt := tx.Bucket(rootBucketName(addr.Container(), key))
+ if bkt == nil {
+ // if object doesn't exists and we want to update split info on it
+ // then ignore, this should never happen
+ return ErrIncorrectSplitInfoUpdate
+ }
+
+ objectKey := objectKey(addr.Object(), key)
+
+ rawSplitInfo := bkt.Get(objectKey)
+ if len(rawSplitInfo) == 0 {
+ return ErrIncorrectSplitInfoUpdate
+ }
+
+ to := objectSDK.NewSplitInfo()
+
+ err := to.Unmarshal(rawSplitInfo)
+ if err != nil {
+ return fmt.Errorf("can't unmarshal split info from root index: %w", err)
+ }
+
+ result := util.MergeSplitInfo(from, to)
+
+ rawSplitInfo, err = result.Marshal()
+ if err != nil {
+ return fmt.Errorf("can't marhsal merged split info: %w", err)
+ }
+
+ return bkt.Put(objectKey, rawSplitInfo)
}
// splitInfoFromObject returns split info based on last or linkin object.
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index f37ed4cf2..a4cb2edc1 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/require"
)
-func prepareObjects(n int) []*objectSDK.Object {
+func prepareObjects(t testing.TB, n int) []*objectSDK.Object {
cnr := cidtest.ID()
parentID := objecttest.ID()
objs := make([]*objectSDK.Object, n)
@@ -46,14 +46,13 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU()))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1)
var index atomic.Int64
index.Store(-1)
- objs := prepareObjects(b.N)
+ objs := prepareObjects(b, b.N)
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
@@ -68,13 +67,12 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1))
- defer func() { require.NoError(b, db.Close(context.Background())) }()
var index atomic.Int64
index.Store(-1)
- objs := prepareObjects(b.N)
+ objs := prepareObjects(b, b.N)
b.ResetTimer()
b.ReportAllocs()
- for range b.N {
+ for i := 0; i < b.N; i++ {
if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
b.Fatal(err)
}
@@ -84,7 +82,6 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
deleted file mode 100644
index 5f0956f0b..000000000
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package meta
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
-)
-
-type epochState struct{ e uint64 }
-
-func (s epochState) CurrentEpoch() uint64 {
- return s.e
-}
-
-func TestResetDropsContainerBuckets(t *testing.T) {
- t.Parallel()
-
- db := New(
- []Option{
- WithPath(filepath.Join(t.TempDir(), "metabase")),
- WithPermissions(0o600),
- WithEpochState(epochState{}),
- }...,
- )
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
-
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- for idx := range 100 {
- var putPrm PutPrm
- putPrm.SetObject(testutil.GenerateObject())
- putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
- _, err := db.Put(context.Background(), putPrm)
- require.NoError(t, err)
- }
-
- require.NoError(t, db.Reset())
-
- var bucketCount int
- require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
- return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
- _, exists := mStaticBuckets[string(name)]
- require.True(t, exists, "unexpected bucket:"+string(name))
- bucketCount++
- return nil
- })
- }))
- require.Equal(t, len(mStaticBuckets), bucketCount)
-}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 60da50671..8b086a89f 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -8,15 +8,17 @@ import (
"strings"
"time"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
type (
@@ -35,9 +37,8 @@ type (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- useAttributeIndex bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -55,10 +56,6 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
-func (p *SelectPrm) SetUseAttributeIndex(v bool) {
- p.useAttributeIndex = v
-}
-
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
@@ -95,14 +92,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err
currEpoch := db.epochState.CurrentEpoch()
return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
success = err == nil
return err
}))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
- group, err := groupFilters(fs, useAttributeIndex)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
+ group, err := groupFilters(fs)
if err != nil {
return nil, err
}
@@ -131,7 +128,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr))
- bc := newBucketCache()
for a, ind := range mAddr {
if ind != expLen {
continue // ignore objects with unmatched fast filters
@@ -146,16 +142,12 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
- st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
- if err != nil {
- return nil, err
- }
- if st > 0 {
+
+ if objectStatus(tx, addr, currEpoch) > 0 {
continue // ignore removed objects
}
- addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch)
- if !match {
+ if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) {
continue // ignore objects with unmatched slow filters
}
@@ -182,7 +174,7 @@ func selectAllFromBucket(tx *bbolt.Tx, name []byte, to map[string]int, fNum int)
return
}
- _ = bkt.ForEach(func(k, _ []byte) error {
+ _ = bkt.ForEach(func(k, v []byte) error {
markAddressInCache(to, fNum, string(k))
return nil
@@ -203,6 +195,12 @@ func (db *DB) selectFastFilter(
switch f.Header() {
case v2object.FilterHeaderObjectID:
db.selectObjectID(tx, f, cnr, to, fNum, currEpoch)
+ case v2object.FilterHeaderOwnerID:
+ bucketName := ownerBucketName(cnr, bucketName)
+ db.selectFromFKBT(tx, bucketName, f, to, fNum)
+ case v2object.FilterHeaderPayloadHash:
+ bucketName := payloadHashBucketName(cnr, bucketName)
+ db.selectFromList(tx, bucketName, f, to, fNum)
case v2object.FilterHeaderObjectType:
for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) {
selectAllFromBucket(tx, bucketName, to, fNum)
@@ -213,9 +211,6 @@ func (db *DB) selectFastFilter(
case v2object.FilterHeaderSplitID:
bucketName := splitBucketName(cnr, bucketName)
db.selectFromList(tx, bucketName, f, to, fNum)
- case v2object.FilterHeaderECParent:
- bucketName := ecInfoBucketName(cnr, bucketName)
- db.selectFromList(tx, bucketName, f, to, fNum)
case v2object.FilterPropertyRoot:
selectAllFromBucket(tx, rootBucketName(cnr, bucketName), to, fNum)
case v2object.FilterPropertyPhy:
@@ -224,6 +219,7 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
default: // user attribute
bucketName := attributeBucketName(cnr, f.Header(), bucketName)
+
if f.Operation() == objectSDK.MatchNotPresent {
selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
} else {
@@ -244,6 +240,7 @@ func allBucketNames(cnr cid.ID) (names [][]byte) {
names = append(names, fn(cnr, make([]byte, bucketKeySize)))
}
}
+
return
}
@@ -278,6 +275,8 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
+// selectFromList looks into index to find list of addresses to add in
+// resulting cache.
func (db *DB) selectFromFKBT(
tx *bbolt.Tx,
name []byte, // fkbt root bucket name
@@ -287,6 +286,8 @@ func (db *DB) selectFromFKBT(
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
+ db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation())))
+
return
}
@@ -295,7 +296,7 @@ func (db *DB) selectFromFKBT(
return
}
- _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
fkbtLeaf := fkbtRoot.Bucket(k)
if fkbtLeaf == nil {
return nil
@@ -307,6 +308,9 @@ func (db *DB) selectFromFKBT(
return nil
})
})
+ if err != nil {
+ db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
+ }
}
// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
@@ -371,17 +375,24 @@ func (db *DB) selectFromList(
case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
+ db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
+
return
}
- if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
+ if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
l, err := decodeList(val)
if err != nil {
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
+ zap.String("error", err.Error()),
+ )
+
return err
}
@@ -389,6 +400,10 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
+ db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
+ zap.String("error", err.Error()),
+ )
+
return
}
}
@@ -413,7 +428,7 @@ func (db *DB) selectObjectID(
addr.SetObject(id)
var splitInfoError *objectSDK.SplitInfoError
- ok, _, err := db.exists(tx, addr, oid.Address{}, currEpoch)
+ ok, err := db.exists(tx, addr, currEpoch)
if (err == nil && ok) || errors.As(err, &splitInfoError) {
raw := make([]byte, objectKeySize)
id.Encode(raw)
@@ -430,6 +445,10 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
+ db.log.Debug(logs.MetabaseUnknownOperation,
+ zap.Uint32("operation", uint32(f.Operation())),
+ )
+
return
}
@@ -440,122 +459,70 @@ func (db *DB) selectObjectID(
return
}
- _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
+ err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, v []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
+ if err != nil {
+ db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
+ zap.String("error", err.Error()),
+ )
+ }
}
}
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
- result := addr
+func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) bool {
if len(f) == 0 {
- return result, true
+ return true
}
- obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
+ buf := make([]byte, addressKeySize)
+ obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil {
- return result, false
+ return false
}
for i := range f {
+ matchFunc, ok := db.matchers[f[i].Operation()]
+ if !ok {
+ return false
+ }
+
var data []byte
+
switch f[i].Header() {
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
- if isECChunk {
- return result, false // EC chunk and EC parent hashes are incomparable
- }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
- if isECChunk {
- return result, false // EC chunk and EC parent payload lengths are incomparable
- }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
- case v2object.FilterHeaderOwnerID:
- data = []byte(obj.OwnerID().EncodeToString())
- case v2object.FilterHeaderPayloadHash:
- if isECChunk {
- return result, false // EC chunk and EC parent payload hashes are incomparable
- }
- cs, _ := obj.PayloadChecksum()
- data = cs.Value()
- default: // user attribute
- v, ok := attributeValue(obj, f[i].Header())
- if ok {
- if ech := obj.ECHeader(); ech != nil {
- result.SetObject(ech.Parent())
- }
- data = []byte(v)
- } else {
- return result, f[i].Operation() == objectSDK.MatchNotPresent
- }
- }
-
- matchFunc, ok := db.matchers[f[i].Operation()]
- if !ok {
- return result, false
+ default:
+ continue // ignore unknown search attributes
}
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
- return result, false
+ return false
}
}
- return result, true
-}
-
-func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
- buf := make([]byte, addressKeySize)
- obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
- if err != nil {
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- for _, chunk := range ecInfoError.ECInfo().Chunks {
- var objID oid.ID
- if err = objID.ReadFromV2(chunk.ID); err != nil {
- continue
- }
- addr.SetObject(objID)
- obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
- if err == nil {
- return obj, true, nil
- }
- }
- }
- return nil, false, err
- }
- return obj, false, nil
-}
-
-func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
- objectAttributes := obj.Attributes()
- if ech := obj.ECHeader(); ech != nil {
- objectAttributes = ech.ParentAttributes()
- }
- for _, attr := range objectAttributes {
- if attr.Key() == attribute {
- return attr.Value(), true
- }
- }
- return "", false
+ return true
}
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
res := filterGroup{
fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
@@ -566,25 +533,18 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
- return filterGroup{}, fmt.Errorf("parse container id: %w", err)
+ return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
}
res.withCnrFilter = true
- case // fast filters
- v2object.FilterHeaderObjectID,
- v2object.FilterHeaderObjectType,
- v2object.FilterHeaderParent,
- v2object.FilterHeaderSplitID,
- v2object.FilterHeaderECParent,
- v2object.FilterPropertyRoot,
- v2object.FilterPropertyPhy:
+ case // slow filters
+ v2object.FilterHeaderVersion,
+ v2object.FilterHeaderCreationEpoch,
+ v2object.FilterHeaderPayloadLength,
+ v2object.FilterHeaderHomomorphicHash:
+ res.slowFilters = append(res.slowFilters, filters[i])
+ default: // fast filters or user attributes if unknown
res.fastFilters = append(res.fastFilters, filters[i])
- default:
- if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
- res.fastFilters = append(res.fastFilters, filters[i])
- } else {
- res.slowFilters = append(res.slowFilters, filters[i])
- }
}
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index ce2156d2e..e107085ab 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -3,42 +3,26 @@ package meta_test
import (
"context"
"encoding/hex"
- "math/rand"
"strconv"
"testing"
+ v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
- t.Run("with_index", func(t *testing.T) {
- testSelectUserAttributes(t, true)
- })
- t.Run("without_index", func(t *testing.T) {
- testSelectUserAttributes(t, false)
- })
-}
-
-func testSelectUserAttributes(t *testing.T, index bool) {
- t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -46,161 +30,123 @@ func testSelectUserAttributes(t *testing.T, index bool) {
testutil.AddAttribute(raw1, "foo", "bar")
testutil.AddAttribute(raw1, "x", "y")
- var putPrm meta.PutPrm
- putPrm.SetIndexAttributes(index)
- putPrm.SetObject(raw1)
- _, err := db.Put(context.Background(), putPrm)
+ err := putBig(db, raw1)
require.NoError(t, err)
raw2 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw2, "foo", "bar")
testutil.AddAttribute(raw2, "x", "z")
- putPrm.SetObject(raw2)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw2)
require.NoError(t, err)
raw3 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw3, "a", "b")
- putPrm.SetObject(raw3)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw3)
require.NoError(t, err)
raw4 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
+ testutil.AddAttribute(raw4, "path", "test/1/2")
- putPrm.SetObject(raw4)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw4)
require.NoError(t, err)
raw5 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
+ testutil.AddAttribute(raw5, "path", "test/1/3")
- putPrm.SetObject(raw5)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw5)
require.NoError(t, err)
raw6 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
+ testutil.AddAttribute(raw6, "path", "test/2/3")
- putPrm.SetObject(raw6)
- _, err = db.Put(context.Background(), putPrm)
+ err = putBig(db, raw6)
require.NoError(t, err)
- raw7 := testutil.GenerateObjectWithCID(cnr)
- var attr objectSDK.Attribute
- attr.SetKey(objectSDK.AttributeFilePath)
- attr.SetValue("/test/3/4")
- attrs := raw7.Attributes()
- attrs = append(attrs, attr)
- ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
- ID: oidtest.ID(),
- Attributes: attrs,
- }, 0, 3, []byte{}, 0)
- raw7.SetECHeader(ech)
- putPrm.SetObject(raw7)
- _, err = db.Put(context.Background(), putPrm)
- require.NoError(t, err)
- var raw7Parent oid.Address
- raw7Parent.SetContainer(cnr)
- raw7Parent.SetObject(ech.Parent())
-
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
+ testSelect(t, db, cnr, fs, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index)
+ testSelect(t, db, cnr, fs)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
- object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
- object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
- object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect2(t, db, cnr, fs, index,
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
- object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
- testSelect2(t, db, cnr, fs, index,
+ fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
- raw7Parent,
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
- testSelect2(t, db, cnr, fs, index,
+ fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
-
- fs = objectSDK.SearchFilters{}
- fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
- testSelect2(t, db, cnr, fs, index,
- raw7Parent,
- )
}
func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -354,7 +300,6 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -372,7 +317,11 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
+ var tombstone oid.Address
+ tombstone.SetContainer(cnr)
+ tombstone.SetObject(oidtest.ID())
+
+ err = metaInhume(db, object.AddressOf(raw2), tombstone)
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
@@ -385,7 +334,6 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -456,7 +404,6 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -564,7 +511,6 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -676,383 +622,10 @@ func TestDB_SelectObjectID(t *testing.T) {
})
}
-func TestDB_SelectOwnerID(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
-
- // prepare
-
- parent := testutil.GenerateObjectWithCID(cnr)
-
- regular := testutil.GenerateObjectWithCID(cnr)
- idParent, _ := parent.ID()
- regular.SetParentID(idParent)
- regular.SetParent(parent)
-
- err := putBig(db, regular)
- require.NoError(t, err)
-
- ts := testutil.GenerateObjectWithCID(cnr)
- ts.SetType(objectSDK.TypeTombstone)
- err = putBig(db, ts)
- require.NoError(t, err)
-
- lock := testutil.GenerateObjectWithCID(cnr)
- lock.SetType(objectSDK.TypeLock)
- err = putBig(db, lock)
- require.NoError(t, err)
-
- t.Run("not found objects", func(t *testing.T) {
- raw := testutil.GenerateObjectWithCID(cnr)
-
- fs := objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, raw.OwnerID())
-
- testSelect(t, db, cnr, fs)
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, raw.OwnerID())
-
- testSelect(t, db, cnr, fs,
- object.AddressOf(regular),
- object.AddressOf(parent),
- object.AddressOf(ts),
- object.AddressOf(lock),
- )
- })
-
- t.Run("regular objects", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, regular.OwnerID())
- testSelect(t, db, cnr, fs, object.AddressOf(regular))
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, regular.OwnerID())
- testSelect(t, db, cnr, fs,
- object.AddressOf(parent),
- object.AddressOf(ts),
- object.AddressOf(lock),
- )
- })
-
- t.Run("tombstone objects", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, ts.OwnerID())
- testSelect(t, db, cnr, fs, object.AddressOf(ts))
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, ts.OwnerID())
- testSelect(t, db, cnr, fs,
- object.AddressOf(regular),
- object.AddressOf(parent),
- object.AddressOf(lock),
- )
- })
-
- t.Run("parent objects", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, parent.OwnerID())
- testSelect(t, db, cnr, fs, object.AddressOf(parent))
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, parent.OwnerID())
- testSelect(t, db, cnr, fs,
- object.AddressOf(regular),
- object.AddressOf(ts),
- object.AddressOf(lock),
- )
- })
-
- t.Run("lock objects", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, lock.OwnerID())
- testSelect(t, db, cnr, fs, object.AddressOf(lock))
-
- fs = objectSDK.SearchFilters{}
- fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, lock.OwnerID())
- testSelect(t, db, cnr, fs,
- object.AddressOf(regular),
- object.AddressOf(parent),
- object.AddressOf(ts),
- )
- })
-}
-
-func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
- ecChunk1 := oidtest.ID()
- ecChunk2 := oidtest.ID()
- ecParent := oidtest.ID()
- var ecParentAddr oid.Address
- ecParentAddr.SetContainer(cnr)
- ecParentAddr.SetObject(ecParent)
- var ecParentAttr []objectSDK.Attribute
- var attr objectSDK.Attribute
- attr.SetKey(objectSDK.AttributeFilePath)
- attr.SetValue("/1/2/3")
- ecParentAttr = append(ecParentAttr, attr)
-
- chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetID(ecChunk1)
- chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
- chunkObj.SetPayloadSize(uint64(5))
- chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
-
- chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetID(ecChunk2)
- chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- chunkObj2.SetPayloadSize(uint64(10))
- chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
-
- // put object with EC
-
- var prm meta.PutPrm
- prm.SetObject(chunkObj)
- _, err := db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- prm.SetObject(chunkObj2)
- _, err = db.Put(context.Background(), prm)
- require.NoError(t, err)
-
- fs := objectSDK.SearchFilters{}
- fs.AddRootFilter()
- fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs, ecParentAddr)
-}
-
-type testTarget struct {
- objects []*objectSDK.Object
-}
-
-func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
- tt.objects = append(tt.objects, obj)
- return nil
-}
-
-func cutObject(t *testing.T, p transformer.ChunkedObjectWriter, hdr *objectSDK.Object, size int) *transformer.AccessIdentifiers {
- ctx := context.Background()
- require.NoError(t, p.WriteHeader(ctx, hdr))
-
- payload := make([]byte, size)
- rand.New(rand.NewSource(0)).Read(payload)
-
- _, err := p.Write(ctx, payload)
- require.NoError(t, err)
-
- ids, err := p.Close(ctx)
- require.NoError(t, err)
- return ids
-}
-
-func TestDB_RawHead_SplitInfo(t *testing.T) {
- t.Parallel()
-
- const (
- partSize = 10
- partCount = 2
- dataCount = 2
- parityCount = 1
- )
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- tt := new(testTarget)
- p := transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: &pk.PrivateKey,
- NextTargetInit: func() transformer.ObjectWriter { return tt },
- NetworkState: epochState{e: 1},
- MaxSize: partSize,
- })
-
- hdr := objectSDK.New()
- hdr.SetContainerID(cnr)
- hdr.SetOwnerID(usertest.ID())
- ids := cutObject(t, p, hdr, partSize*partCount)
- require.Equal(t, len(tt.objects), partCount+1)
-
- t.Run("rep", func(t *testing.T) {
- testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], tt.objects[partCount-1])
- })
- t.Run("with ec", func(t *testing.T) {
- ec, err := erasurecode.NewConstructor(dataCount, parityCount)
- require.NoError(t, err)
-
- cs, err := ec.Split(tt.objects[partCount-1], &pk.PrivateKey)
- require.NoError(t, err)
-
- testGetRawSplitInfo(t, cnr, ids, tt.objects[partCount], cs[0])
- })
-}
-
-func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIdentifiers, linking, lastPart *objectSDK.Object) {
- expectedLinkID, ok := linking.ID()
- require.True(t, ok)
-
- t.Run("first last, then linking", func(t *testing.T) {
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- require.NoError(t, metaPut(db, lastPart, nil))
- require.NoError(t, metaPut(db, linking, nil))
-
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*ids.ParentID)
-
- _, err := metaGet(db, addr, true)
-
- var siErr *objectSDK.SplitInfoError
- require.ErrorAs(t, err, &siErr)
-
- lastID, ok := siErr.SplitInfo().LastPart()
- require.True(t, ok)
- require.Equal(t, ids.SelfID, lastID)
-
- linkID, ok := siErr.SplitInfo().Link()
- require.True(t, ok)
- require.Equal(t, expectedLinkID, linkID)
- })
- t.Run("first linking, then last", func(t *testing.T) {
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- require.NoError(t, metaPut(db, linking, nil))
- require.NoError(t, metaPut(db, lastPart, nil))
-
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*ids.ParentID)
-
- _, err := metaGet(db, addr, true)
-
- var siErr *objectSDK.SplitInfoError
- require.ErrorAs(t, err, &siErr)
-
- lastID, ok := siErr.SplitInfo().LastPart()
- require.True(t, ok)
- require.Equal(t, ids.SelfID, lastID)
-
- linkID, ok := siErr.SplitInfo().Link()
- require.True(t, ok)
- require.Equal(t, expectedLinkID, linkID)
- })
- t.Run("only last part", func(t *testing.T) {
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- require.NoError(t, metaPut(db, lastPart, nil))
-
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(*ids.ParentID)
-
- _, err := metaGet(db, addr, true)
-
- var siErr *objectSDK.SplitInfoError
- require.ErrorAs(t, err, &siErr)
-
- lastPart, ok := siErr.SplitInfo().LastPart()
- require.True(t, ok)
- require.Equal(t, ids.SelfID, lastPart)
- })
-}
-
-func TestDB_SelectSplitID_EC(t *testing.T) {
- t.Parallel()
-
- const (
- partSize = 10
- partCount = 2
- dataCount = 2
- parityCount = 1
- )
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- cnr := cidtest.ID()
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- tt := new(testTarget)
- p := transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: &pk.PrivateKey,
- NextTargetInit: func() transformer.ObjectWriter { return tt },
- NetworkState: epochState{e: 1},
- MaxSize: partSize,
- })
-
- hdr := objectSDK.New()
- hdr.SetContainerID(cnr)
- hdr.SetOwnerID(usertest.ID())
- cutObject(t, p, hdr, partSize*partCount)
- require.Equal(t, len(tt.objects), partCount+1)
-
- split := tt.objects[0].SplitID()
- require.NotNil(t, split)
-
- ec, err := erasurecode.NewConstructor(dataCount, parityCount)
- require.NoError(t, err)
-
- for i := range partCount {
- cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
- require.NoError(t, err)
-
- require.NoError(t, putBig(db, cs[0]))
- }
-
- t.Run("not present", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs)
- })
-
- t.Run("split id", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderSplitID, split.String(), objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
- object.AddressOf(tt.objects[0]),
- object.AddressOf(tt.objects[1]),
- )
- })
-
- t.Run("empty split", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderSplitID, "", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
- })
-
- t.Run("unknown split id", func(t *testing.T) {
- fs := objectSDK.SearchFilters{}
- fs.AddFilter(v2object.FilterHeaderSplitID,
- objectSDK.NewSplitID().String(),
- objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
- })
-}
-
func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1109,7 +682,6 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -1157,11 +729,9 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) {
const objCount = 1000
db := newDB(b)
- defer func() { require.NoError(b, db.Close(context.Background())) }()
-
cid := cidtest.ID()
- for i := range objCount {
+ for i := 0; i < objCount; i++ {
var attr objectSDK.Attribute
attr.SetKey("myHeader")
attr.SetValue(strconv.Itoa(i))
@@ -1199,30 +769,27 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
- defer func() { require.NoError(t, db.Close(context.Background())) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
require.NoError(t, err)
require.NotEmpty(t, objs)
})
}
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
- b.ReportAllocs()
-
var prm meta.SelectPrm
prm.SetContainerID(cid)
prm.SetFilters(fs)
- for range b.N {
+ for i := 0; i < b.N; i++ {
res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
@@ -1233,11 +800,10 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
- prm.SetUseAttributeIndex(useAttributeIndex)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go
index 72618b1a0..f60a4724d 100644
--- a/pkg/local_object_storage/metabase/shard_id.go
+++ b/pkg/local_object_storage/metabase/shard_id.go
@@ -1,14 +1,8 @@
package meta
import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "os"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
- metamode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
)
@@ -17,76 +11,38 @@ var (
shardIDKey = []byte("id")
)
-// GetShardID sets metabase operation mode
-// and reads shard id from db.
-// If id is missing, returns nil, nil.
-//
-// GetShardID does not report any metrics.
-func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) {
- db.modeMtx.Lock()
- defer db.modeMtx.Unlock()
- db.mode = mode
-
- if _, err := os.Stat(db.info.Path); errors.Is(err, os.ErrNotExist) {
- return nil, nil
- }
-
- if err := db.openDB(ctx, mode); err != nil {
- return nil, fmt.Errorf("open metabase: %w", err)
- }
-
- id, err := db.readShardID()
-
- if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
- }
-
- return id, metaerr.Wrap(err)
-}
-
// ReadShardID reads shard id from db.
// If id is missing, returns nil, nil.
-func (db *DB) readShardID() ([]byte, error) {
+func (db *DB) ReadShardID() ([]byte, error) {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
+
var id []byte
err := db.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(shardInfoBucket)
if b != nil {
- id = bytes.Clone(b.Get(shardIDKey))
+ id = slice.Copy(b.Get(shardIDKey))
}
return nil
})
return id, metaerr.Wrap(err)
}
-// SetShardID sets metabase operation mode
-// and writes shard id to db.
-func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error {
- db.modeMtx.Lock()
- defer db.modeMtx.Unlock()
- db.mode = mode
+// WriteShardID writes shard it to db.
+func (db *DB) WriteShardID(id []byte) error {
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
- if mode.ReadOnly() {
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ } else if db.mode.ReadOnly() {
return ErrReadOnlyMode
}
- if err := db.openDB(ctx, mode); err != nil {
- return fmt.Errorf("open metabase: %w", err)
- }
-
- err := db.writeShardID(id)
- if err == nil {
- db.metrics.SetMode(metamode.ConvertToComponentModeDegraded(mode))
- }
-
- if cErr := db.close(); cErr != nil {
- err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
- }
-
- return metaerr.Wrap(err)
-}
-
-// writeShardID writes shard id to db.
-func (db *DB) writeShardID(id []byte) error {
return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index 8f2376503..6ba5a60cb 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -1,13 +1,13 @@
package meta
import (
- "bytes"
"context"
- "time"
+ "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -35,15 +35,7 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("StorageID", time.Since(startedAt), success)
- }()
-
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
@@ -53,32 +45,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, er
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
- var res StorageIDRes
if db.mode.NoMetabase() {
return res, ErrDegradedMode
}
- err := db.boltDB.View(func(tx *bbolt.Tx) error {
- res.id = db.storageID(tx, prm.addr)
- return nil
+ err = db.boltDB.View(func(tx *bbolt.Tx) error {
+ res.id, err = db.storageID(tx, prm.addr)
+
+ return err
})
- success = err == nil
+
return res, metaerr.Wrap(err)
}
-func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte {
+func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) {
key := make([]byte, bucketKeySize)
smallBucket := tx.Bucket(smallBucketName(addr.Container(), key))
if smallBucket == nil {
- return nil
+ return nil, nil
}
storageID := smallBucket.Get(objectKey(addr.Object(), key))
if storageID == nil {
- return nil
+ return nil, nil
}
- return bytes.Clone(storageID)
+ return slice.Copy(storageID), nil
}
// UpdateStorageIDPrm groups the parameters of UpdateStorageID operation.
@@ -101,22 +93,7 @@ func (p *UpdateStorageIDPrm) SetStorageID(id []byte) {
}
// UpdateStorageID updates storage descriptor for objects from the blobstor.
-func (db *DB) UpdateStorageID(ctx context.Context, prm UpdateStorageIDPrm) (res UpdateStorageIDRes, err error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- db.metrics.AddMethodDuration("UpdateStorageID", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "metabase.UpdateStorageID",
- trace.WithAttributes(
- attribute.String("address", prm.addr.EncodeToString()),
- attribute.String("storage_id", string(prm.id)),
- ))
- defer span.End()
-
+func (db *DB) UpdateStorageID(prm UpdateStorageIDPrm) (res UpdateStorageIDRes, err error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -126,9 +103,16 @@ func (db *DB) UpdateStorageID(ctx context.Context, prm UpdateStorageIDPrm) (res
return res, ErrReadOnlyMode
}
+ currEpoch := db.epochState.CurrentEpoch()
+
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
- return setStorageID(tx, prm.addr, prm.id, true)
+ exists, err := db.exists(tx, prm.addr, currEpoch)
+ if err == nil && exists || errors.Is(err, ErrObjectIsExpired) {
+ err = updateStorageID(tx, prm.addr, prm.id)
+ }
+
+ return err
})
- success = err == nil
+
return res, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index fef680159..b3652a680 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -15,11 +15,9 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject()
- deleted := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4}
@@ -36,15 +34,6 @@ func TestDB_StorageID(t *testing.T) {
err = putBig(db, raw2)
require.NoError(t, err)
- // put object with storageID and delete it
- err = metaPut(db, deleted, storageID)
- require.NoError(t, err)
-
- cnrID, ok := deleted.ContainerID()
- require.True(t, ok)
- ts := testutil.GenerateObjectWithCID(cnrID)
- require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
-
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
require.NoError(t, err)
@@ -55,58 +44,21 @@ func TestDB_StorageID(t *testing.T) {
require.NoError(t, err)
require.Equal(t, storageID, fetchedStorageID)
- // check StorageID for deleted object with storageID
- fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted))
- require.NoError(t, err)
- require.Equal(t, storageID, fetchedStorageID)
-
t.Run("update", func(t *testing.T) {
- storageID := []byte{1, 2, 3, 4, 5}
require.NoError(t, metaUpdateStorageID(db, object.AddressOf(raw2), storageID))
- require.NoError(t, metaUpdateStorageID(db, object.AddressOf(deleted), storageID))
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
require.NoError(t, err)
require.Equal(t, storageID, fetchedStorageID)
-
- fetchedStorageID, err = metaStorageID(db, object.AddressOf(deleted))
- require.NoError(t, err)
- require.Equal(t, storageID, fetchedStorageID)
})
}
-func TestPutWritecacheDataRace(t *testing.T) {
- t.Parallel()
-
- db := newDB(t)
- defer func() { require.NoError(t, db.Close(context.Background())) }()
-
- putStorageID := []byte{1, 2, 3}
- wcStorageID := []byte{1, 2, 3, 4, 5}
- o := testutil.GenerateObject()
-
- fetchedStorageID, err := metaStorageID(db, object.AddressOf(o))
- require.NoError(t, err)
- require.Nil(t, fetchedStorageID)
-
- // writecache flushes object and updates storageID before object actually saved to the metabase
- metaUpdateStorageID(db, object.AddressOf(o), wcStorageID)
-
- // put object completes with writecache's storageID
- err = metaPut(db, o, putStorageID)
- require.NoError(t, err)
-
- fetchedStorageID, err = metaStorageID(db, object.AddressOf(o))
- require.NoError(t, err)
- require.Equal(t, wcStorageID, fetchedStorageID)
-}
-
func metaUpdateStorageID(db *meta.DB, addr oid.Address, id []byte) error {
var sidPrm meta.UpdateStorageIDPrm
sidPrm.SetAddress(addr)
sidPrm.SetStorageID(id)
- _, err := db.UpdateStorageID(context.Background(), sidPrm)
+ _, err := db.UpdateStorageID(sidPrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
deleted file mode 100644
index 4948f3424..000000000
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ /dev/null
@@ -1,602 +0,0 @@
-package meta
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "encoding/hex"
- "errors"
- "fmt"
- "os"
- "strconv"
- "sync/atomic"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
- "golang.org/x/sync/errgroup"
-)
-
-const (
- upgradeLogFrequency = 50_000
- upgradeWorkersCount = 1_000
- compactMaxTxSize = 256 << 20
- upgradeTimeout = 1 * time.Second
-)
-
-var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
- 2: upgradeFromV2ToV3,
- 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
- log("metabase already upgraded")
- return nil
- },
-}
-
-func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
- if _, err := os.Stat(path); err != nil {
- return fmt.Errorf("check metabase existence: %w", err)
- }
- opts := bbolt.DefaultOptions
- opts.Timeout = upgradeTimeout
- db, err := bbolt.Open(path, os.ModePerm, opts)
- if err != nil {
- return fmt.Errorf("open metabase: %w", err)
- }
- var version uint64
- if err := db.View(func(tx *bbolt.Tx) error {
- var e error
- version, e = currentVersion(tx)
- return e
- }); err != nil {
- return err
- }
- updater, found := updates[version]
- if !found {
- return fmt.Errorf("unsupported version %d: no update available", version)
- }
- if err := db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(shardInfoBucket)
- return b.Put(upgradeKey, zeroValue)
- }); err != nil {
- return fmt.Errorf("set upgrade key %w", err)
- }
- if err := updater(ctx, db, cs, log); err != nil {
- return fmt.Errorf("update metabase schema: %w", err)
- }
- if err := db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(shardInfoBucket)
- return b.Delete(upgradeKey)
- }); err != nil {
- return fmt.Errorf("delete upgrade key %w", err)
- }
- if compact {
- log("compacting metabase...")
- err := compactDB(db)
- if err != nil {
- return fmt.Errorf("compact metabase: %w", err)
- }
- log("metabase compacted")
- }
- return db.Close()
-}
-
-func compactDB(db *bbolt.DB) error {
- sourcePath := db.Path()
- tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339)
- f, err := os.Stat(sourcePath)
- if err != nil {
- return err
- }
- dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
- Timeout: 100 * time.Millisecond,
- NoSync: true,
- })
- if err != nil {
- return fmt.Errorf("open new metabase to compact: %w", err)
- }
- if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
- return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
- }
- if err := dst.Sync(); err != nil {
- return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
- }
- if err := dst.Close(); err != nil {
- return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
- }
- if err := db.Close(); err != nil {
- return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
- }
- if err := os.Rename(tmpFileName, sourcePath); err != nil {
- return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName)))
- }
- return nil
-}
-
-func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
- if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
- return err
- }
- eg, ctx := errgroup.WithContext(ctx)
- eg.Go(func() error {
- return dropUserAttributes(ctx, db, cs, log)
- })
- eg.Go(func() error {
- return dropOwnerIDIndex(ctx, db, log)
- })
- eg.Go(func() error {
- return dropPayloadChecksumIndex(ctx, db, log)
- })
- if err := eg.Wait(); err != nil {
- return err
- }
- return db.Update(func(tx *bbolt.Tx) error {
- return updateVersion(tx, version)
- })
-}
-
-type objectIDToExpEpoch struct {
- containerID cid.ID
- objectID oid.ID
- expirationEpoch uint64
-}
-
-func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- log("filling expiration epoch buckets...")
- if err := db.Update(func(tx *bbolt.Tx) error {
- _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName)
- return err
- }); err != nil {
- return err
- }
- objects := make(chan objectIDToExpEpoch)
- eg, ctx := errgroup.WithContext(ctx)
- eg.Go(func() error {
- return selectObjectsWithExpirationEpoch(ctx, db, objects)
- })
- var count atomic.Uint64
- for range upgradeWorkersCount {
- eg.Go(func() error {
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case obj, ok := <-objects:
- if !ok {
- return nil
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- if err := putUniqueIndexItem(tx, namedBucketItem{
- name: expEpochToObjectBucketName,
- key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID),
- val: zeroValue,
- }); err != nil {
- return err
- }
- val := make([]byte, epochSize)
- binary.LittleEndian.PutUint64(val, obj.expirationEpoch)
- return putUniqueIndexItem(tx, namedBucketItem{
- name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)),
- key: objectKey(obj.objectID, make([]byte, objectKeySize)),
- val: val,
- })
- }); err != nil {
- return err
- }
- }
- if c := count.Add(1); c%upgradeLogFrequency == 0 {
- log("expiration epoch filled for", c, "objects...")
- }
- }
- })
- }
- err := eg.Wait()
- if err != nil {
- log("expiration epoch buckets completed completed with error:", err)
- return err
- }
- log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects")
- return nil
-}
-
-func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error {
- defer close(objects)
-
- const batchSize = 1000
- it := &objectsWithExpirationEpochBatchIterator{
- lastAttributeKey: usrAttrPrefix,
- }
- for {
- if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil {
- return err
- }
- for _, item := range it.items {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case objects <- item:
- }
- }
-
- if len(it.items) < batchSize {
- return nil
- }
- it.items = nil
- }
-}
-
-var (
- usrAttrPrefix = []byte{userAttributePrefix}
- errBatchSizeLimit = errors.New("batch size limit")
-)
-
-type objectsWithExpirationEpochBatchIterator struct {
- lastAttributeKey []byte
- lastAttributeValue []byte
- lastAttrKeyValueItem []byte
- items []objectIDToExpEpoch
-}
-
-// - {prefix}{containerID}{attributeKey} <- bucket
-// -- {attributeValue} <- bucket, expirationEpoch
-// --- {objectID}: zeroValue <- record
-
-func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error {
- seekAttrValue := it.lastAttributeValue
- seekAttrKVItem := it.lastAttrKeyValueItem
- err := db.View(func(tx *bbolt.Tx) error {
- attrKeyC := tx.Cursor()
- for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- if len(attrKey) <= 1+cidSize {
- continue
- }
- attributeKey := string(attrKey[1+cidSize:])
- if attributeKey != objectV2.SysAttributeExpEpoch {
- continue
- }
- var containerID cid.ID
- if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil {
- return fmt.Errorf("decode container id from user attribute bucket: %w", err)
- }
- if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil {
- return err
- }
- seekAttrValue = nil
- seekAttrKVItem = nil
- }
- return nil
- })
- if err != nil && !errors.Is(err, errBatchSizeLimit) {
- return err
- }
- return nil
-}
-
-func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error {
- attrValueC := b.Cursor()
- for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- if v != nil {
- continue // need to iterate over buckets, not records
- }
- expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
- if err != nil {
- return fmt.Errorf("parse expiration epoch: %w", err)
- }
- expirationEpochBucket := b.Bucket(attrValue)
- attrKeyValueC := expirationEpochBucket.Cursor()
- for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- if v == nil {
- continue // need to iterate over records, not buckets
- }
- if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) {
- continue
- }
- var objectID oid.ID
- if err := objectID.Decode(attrKeyValueItem); err != nil {
- return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err)
- }
- it.lastAttributeKey = bytes.Clone(attrKey)
- it.lastAttributeValue = bytes.Clone(attrValue)
- it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem)
- it.items = append(it.items, objectIDToExpEpoch{
- containerID: containerID,
- objectID: objectID,
- expirationEpoch: expirationEpoch,
- })
- if len(it.items) == batchSize {
- return errBatchSizeLimit
- }
- }
- seekAttrKVItem = nil
- }
- return nil
-}
-
-func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
- log("deleting user attribute buckets...")
- const batch = 1000
- prefix := []byte{userAttributePrefix}
- last := prefix
- var count uint64
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- var keys [][]byte
- if err := db.View(func(tx *bbolt.Tx) error {
- c := tx.Cursor()
- for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
- if bytes.Equal(last, k) {
- continue
- }
- keys = append(keys, bytes.Clone(k))
- }
- return nil
- }); err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- if len(keys) == 0 {
- log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
- return nil
- }
- last = keys[len(keys)-1]
- cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys)
- if err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- count += cnt
- cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
- if err != nil {
- log("deleting user attribute buckets completed with an error:", err)
- return err
- }
- count += cnt
- log("deleted", count, "user attribute buckets")
- }
-}
-
-func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
- keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs)
- if err != nil {
- return 0, fmt.Errorf("select non indexed user attributes: %w", err)
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, k := range keysToDrop {
- if err := tx.DeleteBucket(k); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
- }
- return uint64(len(keysToDrop)), nil
-}
-
-func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
- var keysToDrop [][]byte
- for _, key := range keys {
- attr, ok := attributeFromAttributeBucket(key)
- if !ok {
- return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
- }
- if !IsAtrributeIndexed(attr) {
- keysToDrop = append(keysToDrop, key)
- continue
- }
- contID, ok := cidFromAttributeBucket(key)
- if !ok {
- return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
- }
- info, err := cs.Info(ctx, contID)
- if err != nil {
- return nil, err
- }
- if info.Removed || !info.Indexed {
- keysToDrop = append(keysToDrop, key)
- }
- }
- return keysToDrop, nil
-}
-
-func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
- var dropBuckets [][]byte
- for _, key := range keys {
- select {
- case <-ctx.Done():
- return 0, ctx.Err()
- default:
- }
-
- if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
- return 0, err
- }
-
- empty, exists, err := bucketIsEmpty(db, key)
- if err != nil {
- return 0, err
- }
- if empty && exists {
- dropBuckets = append(dropBuckets, key)
- }
- }
- if len(dropBuckets) == 0 {
- return 0, nil
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, key := range dropBuckets {
- if err := tx.DeleteBucket(key); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
- }
- return uint64(len(dropBuckets)), nil
-}
-
-func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
- var empty bool
- var exists bool
- if err := db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(bucketKey)
- if b == nil {
- return nil
- }
- exists = true
- empty = !hasAnyItem(b)
- return nil
- }); err != nil {
- return false, false, fmt.Errorf("bucket empty check: %w", err)
- }
- return empty, exists, nil
-}
-
-func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
- var last []byte
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var dropBuckets [][]byte
- var err error
- dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
- if err != nil {
- return fmt.Errorf("select empty nested buckets: %w", err)
- }
- if len(dropBuckets) == 0 {
- return nil
- }
-
- if err := db.Batch(func(tx *bbolt.Tx) error {
- rootBucket := tx.Bucket(rootBucketKey)
- if rootBucket == nil {
- return nil
- }
- for _, sb := range dropBuckets {
- if err := rootBucket.DeleteBucket(sb); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- return fmt.Errorf("drop empty nested buckets: %w", err)
- }
- }
-}
-
-func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
- const batchSize = 1000
- var result [][]byte
- if err := db.View(func(tx *bbolt.Tx) error {
- rootBucket := tx.Bucket(rootBucketKey)
- if rootBucket == nil {
- return nil
- }
- c := rootBucket.Cursor()
- for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if bytes.Equal(last, k) {
- continue
- }
- last = bytes.Clone(k)
- if v != nil { // record
- continue
- }
- nestedBucket := rootBucket.Bucket(k)
- if nestedBucket == nil {
- continue
- }
- if !hasAnyItem(nestedBucket) {
- result = append(result, bytes.Clone(k))
- }
- }
- return nil
- }); err != nil {
- return nil, nil, err
- }
- return result, last, nil
-}
-
-func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
- log(append([]any{"owner ID index:"}, a...)...)
- })
-}
-
-func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) {
- log(append([]any{"payload checksum:"}, a...)...)
- })
-}
-
-func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error {
- log("deleting buckets...")
- const batch = 1000
- var count uint64
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- var keys [][]byte
- if err := db.View(func(tx *bbolt.Tx) error {
- c := tx.Cursor()
- for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
- keys = append(keys, bytes.Clone(k))
- }
- return nil
- }); err != nil {
- log("deleting buckets completed with an error:", err)
- return err
- }
- if len(keys) == 0 {
- log("deleting buckets completed successfully, deleted", count, "buckets")
- return nil
- }
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, k := range keys {
- if err := tx.DeleteBucket(k); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- log("deleting buckets completed with an error:", err)
- return err
- }
- count += uint64(len(keys))
- log("deleted", count, "buckets")
- }
-}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
deleted file mode 100644
index c90de4dd6..000000000
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ /dev/null
@@ -1,222 +0,0 @@
-//go:build integration
-
-package meta
-
-import (
- "context"
- "fmt"
- "io"
- "os"
- "strconv"
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/stretchr/testify/require"
- "golang.org/x/sync/errgroup"
-)
-
-const upgradeFilePath = "/path/to/metabase.v2"
-
-func TestUpgradeV2ToV3(t *testing.T) {
- path := createTempCopy(t, upgradeFilePath)
- defer func() {
- require.NoError(t, os.Remove(path))
- }()
- db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion)
- require.NoError(t, db.Close(context.Background()))
- require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
- fmt.Println()
-}
-
-type testContainerInfoProvider struct{}
-
-func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) {
- return container.Info{}, nil
-}
-
-func createTempCopy(t *testing.T, path string) string {
- src, err := os.Open(path)
- require.NoError(t, err)
-
- tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339)
- dest, err := os.Create(tmpPath)
- require.NoError(t, err)
-
- _, err = io.Copy(dest, src)
- require.NoError(t, err)
-
- require.NoError(t, src.Close())
- require.NoError(t, dest.Close())
-
- return tmpPath
-}
-
-func TestGenerateMetabaseFile(t *testing.T) {
- t.Skip("for generating db")
- const (
- containersCount = 10_000
- simpleObjectsCount = 500_000
- complexObjectsCount = 500_000 // x2
- deletedByGCMarksCount = 100_000
- deletedByTombstoneCount = 100_000 // x2
- lockedCount = 100_000 // x2
-
- allocSize = 128 << 20
- generateWorkersCount = 1_000
- minEpoch = 1_000
- maxFilename = 1_000
- maxStorageID = 10_000
- )
-
- db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t)))
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- db.boltDB.AllocSize = allocSize
- db.boltDB.NoSync = true
- require.NoError(t, db.Init(context.Background()))
- containers := make([]cid.ID, containersCount)
- for i := range containers {
- containers[i] = cidtest.ID()
- }
- oc, err := db.ObjectCounters()
- require.NoError(t, err)
- require.True(t, oc.IsZero())
- eg, ctx := errgroup.WithContext(context.Background())
- eg.SetLimit(generateWorkersCount)
- // simple objects
- for i := range simpleObjectsCount {
- i := i
- eg.Go(func() error {
- obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
- _, err := db.Put(ctx, PutPrm{
- obj: obj,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- require.NoError(t, err)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects generated")
- eg, ctx = errgroup.WithContext(context.Background())
- eg.SetLimit(generateWorkersCount)
- // complex objects
- for i := range complexObjectsCount {
- i := i
- eg.Go(func() error {
- parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- child := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- child.SetParent(parent)
- idParent, _ := parent.ID()
- child.SetParentID(idParent)
- testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
- testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
- _, err := db.Put(ctx, PutPrm{
- obj: child,
- })
- require.NoError(t, err)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
- db.log.Info(ctx, "complex objects generated")
- eg, ctx = errgroup.WithContext(context.Background())
- eg.SetLimit(generateWorkersCount)
- // simple objects deleted by gc marks
- for i := range deletedByGCMarksCount {
- i := i
- eg.Go(func() error {
- obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- _, err := db.Put(ctx, PutPrm{
- obj: obj,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- require.NoError(t, err)
- _, err = db.Inhume(ctx, InhumePrm{
- target: []oid.Address{object.AddressOf(obj)},
- })
- require.NoError(t, err)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by gc marks generated")
- eg, ctx = errgroup.WithContext(context.Background())
- eg.SetLimit(10000)
- // simple objects deleted by tombstones
- for i := range deletedByTombstoneCount {
- i := i
- eg.Go(func() error {
- obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- _, err := db.Put(ctx, PutPrm{
- obj: obj,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- tomb.SetType(objectSDK.TypeTombstone)
- _, err = db.Put(ctx, PutPrm{
- obj: tomb,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- require.NoError(t, err)
- tombAddr := object.AddressOf(tomb)
- _, err = db.Inhume(ctx, InhumePrm{
- target: []oid.Address{object.AddressOf(obj)},
- tomb: &tombAddr,
- })
- require.NoError(t, err)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects deleted by tombstones generated")
- eg, ctx = errgroup.WithContext(context.Background())
- eg.SetLimit(generateWorkersCount)
- // simple objects locked by locks
- for i := range lockedCount {
- i := i
- eg.Go(func() error {
- obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
- _, err := db.Put(ctx, PutPrm{
- obj: obj,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- lock := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- lock.SetType(objectSDK.TypeLock)
- testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
- _, err = db.Put(ctx, PutPrm{
- obj: lock,
- id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
- })
- require.NoError(t, err)
- err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()})
- require.NoError(t, err)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
- db.log.Info(ctx, "simple objects locked by locks generated")
- require.NoError(t, db.boltDB.Sync())
- require.NoError(t, db.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 4ad83332b..c9d9bb947 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -1,12 +1,10 @@
package meta
import (
+ "bytes"
"crypto/sha256"
- "encoding/binary"
- "errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -21,15 +19,11 @@ var (
graveyardBucketName = []byte{graveyardPrefix}
// garbageBucketName stores rows with the objects that should be physically
// deleted by the node (Garbage Collector routine).
- garbageBucketName = []byte{garbagePrefix}
- toMoveItBucketName = []byte{toMoveItPrefix}
- containerVolumeBucketName = []byte{containerVolumePrefix}
- containerCounterBucketName = []byte{containerCountersPrefix}
- expEpochToObjectBucketName = []byte{expirationEpochToObjectPrefix}
+ garbageBucketName = []byte{garbagePrefix}
+ toMoveItBucketName = []byte{toMoveItPrefix}
+ containerVolumeBucketName = []byte{containerVolumePrefix}
zeroValue = []byte{0xFF}
-
- errInvalidLength = errors.New("invalid length")
)
// Prefix bytes for database keys. All ids and addresses are encoded in binary
@@ -92,12 +86,11 @@ const (
// FKBT index buckets.
// ====================
- // ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs.
+ // ownerPrefix is used for prefixing FKBT index buckets mapping owner to object IDs.
// Key: owner ID
// Value: bucket containing object IDs as keys
- // removed in version 3
ownerPrefix
- // userAttributePrefix was used for prefixing FKBT index buckets containing objects.
+ // userAttributePrefix is used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
userAttributePrefix
@@ -106,10 +99,9 @@ const (
// List index buckets.
// ====================
- // payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs.
+ // payloadHashPrefix is used for prefixing List index buckets mapping payload hash to a list of object IDs.
// Key: payload hash
// Value: list of object IDs
- // removed in version 3
payloadHashPrefix
// parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs.
// Key: parent ID
@@ -119,26 +111,6 @@ const (
// Key: split ID
// Value: list of object IDs
splitPrefix
-
- // containerCountersPrefix is used for storing container object counters.
- // Key: container ID + type
- // Value: container size in bytes as little-endian uint64
- containerCountersPrefix
-
- // ecInfoPrefix is used for storing relation between EC parent id and chunk id.
- // Key: container ID + type
- // Value: Object id
- ecInfoPrefix
-
- // expirationEpochToObjectPrefix is used for storing relation between expiration epoch and object id.
- // Key: expiration epoch + object address
- // Value: zero
- expirationEpochToObjectPrefix
-
- // objectToExpirationEpochPrefix is used for storing relation between expiration epoch and object id.
- // Key: object address
- // Value: expiration epoch
- objectToExpirationEpochPrefix
)
const (
@@ -146,7 +118,6 @@ const (
bucketKeySize = 1 + cidSize
objectKeySize = sha256.Size
addressKeySize = cidSize + objectKeySize
- epochSize = 8
)
func bucketName(cnr cid.ID, prefix byte, key []byte) []byte {
@@ -170,26 +141,25 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
-// attributeBucketName returns _.
+// attributeBucketName returns _attr_.
func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
key[0] = userAttributePrefix
cnr.Encode(key[1:])
return append(key[:bucketKeySize], attributeKey...)
}
-func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
- if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
- return cid.ID{}, false
+// returns from attributeBucketName result, nil otherwise.
+func cidFromAttributeBucket(val []byte, attributeKey string) []byte {
+ if len(val) < bucketKeySize || val[0] != userAttributePrefix || !bytes.Equal(val[bucketKeySize:], []byte(attributeKey)) {
+ return nil
}
- var result cid.ID
- return result, result.Decode(bucketName[1:bucketKeySize]) == nil
+
+ return val[1:bucketKeySize]
}
-func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
- if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
- return "", false
- }
- return string(bucketName[bucketKeySize:]), true
+// payloadHashBucketName returns _payloadhash.
+func payloadHashBucketName(cnr cid.ID, key []byte) []byte {
+ return bucketName(cnr, payloadHashPrefix, key)
}
// rootBucketName returns _root.
@@ -197,6 +167,11 @@ func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
}
+// ownerBucketName returns _ownerid.
+func ownerBucketName(cnr cid.ID, key []byte) []byte {
+ return bucketName(cnr, ownerPrefix, key)
+}
+
// parentBucketName returns _parent.
func parentBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, parentPrefix, key)
@@ -207,40 +182,6 @@ func splitBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, splitPrefix, key)
}
-// ecInfoBucketName returns _ecinfo.
-func ecInfoBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, ecInfoPrefix, key)
-}
-
-// objectToExpirationEpochBucketName returns objectToExpirationEpochPrefix_.
-func objectToExpirationEpochBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, objectToExpirationEpochPrefix, key)
-}
-
-func expirationEpochKey(epoch uint64, cnr cid.ID, obj oid.ID) []byte {
- result := make([]byte, epochSize+addressKeySize)
- binary.BigEndian.PutUint64(result, epoch)
- cnr.Encode(result[epochSize:])
- obj.Encode(result[epochSize+cidSize:])
- return result
-}
-
-func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
- if len(key) != epochSize+addressKeySize {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("unexpected expiration epoch to object key length: %d", len(key))
- }
- epoch := binary.BigEndian.Uint64(key)
- var cnr cid.ID
- if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
- }
- var obj oid.ID
- if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
- return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
- }
- return epoch, cnr, obj, nil
-}
-
// addressKey returns key for K-V tables when key is a whole address.
func addressKey(addr oid.Address, key []byte) []byte {
addr.Container().Encode(key)
@@ -251,7 +192,7 @@ func addressKey(addr oid.Address, key []byte) []byte {
// parses object address formed by addressKey.
func decodeAddressFromKey(dst *oid.Address, k []byte) error {
if len(k) != addressKeySize {
- return errInvalidLength
+ return fmt.Errorf("invalid length")
}
var cnr cid.ID
@@ -279,7 +220,9 @@ func objectKey(obj oid.ID, key []byte) []byte {
//
// firstIrregularObjectType(tx, cnr, obj) usage allows getting object type.
func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type {
- assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType")
+ if len(objs) == 0 {
+ panic("empty object list in firstIrregularObjectType")
+ }
var keys [2][1 + cidSize]byte
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index fbc0f1ad9..5748b64ee 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -2,7 +2,6 @@ package meta
import (
"encoding/binary"
- "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -10,22 +9,15 @@ import (
)
// version contains current metabase version.
-const version = 3
+const version = 2
-var (
- versionKey = []byte("version")
- upgradeKey = []byte("upgrade")
-)
+var versionKey = []byte("version")
// ErrOutdatedVersion is returned on initializing
// an existing metabase that is not compatible with
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
-var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed")
-
-var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
-
func checkVersion(tx *bbolt.Tx, initialized bool) error {
var knownVersion bool
@@ -40,10 +32,6 @@ func checkVersion(tx *bbolt.Tx, initialized bool) error {
return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored)
}
}
- data = b.Get(upgradeKey)
- if len(data) > 0 {
- return ErrIncompletedUpgrade
- }
}
if !initialized {
@@ -67,19 +55,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
- return fmt.Errorf("create auxiliary bucket: %w", err)
+ return fmt.Errorf("can't create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}
-
-func currentVersion(tx *bbolt.Tx) (uint64, error) {
- b := tx.Bucket(shardInfoBucket)
- if b == nil {
- return 0, errVersionUndefinedNoInfoBucket
- }
- data := b.Get(versionKey)
- if len(data) != 8 {
- return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data))
- }
- return binary.LittleEndian.Uint64(data), nil
-}
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index b373fb32e..6f011c246 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -8,7 +8,6 @@ import (
"path/filepath"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@@ -24,7 +23,7 @@ func TestVersion(t *testing.T) {
newDB := func(t *testing.T) *DB {
return New(WithPath(filepath.Join(dir, t.Name())),
- WithPermissions(0o600), WithEpochState(epochStateImpl{}))
+ WithPermissions(0600), WithEpochState(epochStateImpl{}))
}
check := func(t *testing.T, db *DB) {
require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
@@ -44,64 +43,46 @@ func TestVersion(t *testing.T) {
}
t.Run("simple", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Open(context.Background(), false))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
t.Run("reopen", func(t *testing.T) {
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Open(context.Background(), false))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
t.Run("old data", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite))
+ require.NoError(t, db.Open(context.Background(), false))
+ require.NoError(t, db.WriteShardID([]byte{1, 2, 3, 4}))
+ require.NoError(t, db.Close())
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
+ require.NoError(t, db.Open(context.Background(), false))
+ require.NoError(t, db.Init())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
t.Run("invalid version", func(t *testing.T) {
db := newDB(t)
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Open(context.Background(), false))
require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
return updateVersion(tx, version+1)
}))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.Error(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Open(context.Background(), false))
+ require.Error(t, db.Init())
+ require.NoError(t, db.Close())
t.Run("reset", func(t *testing.T) {
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Open(context.Background(), false))
require.NoError(t, db.Reset())
check(t, db)
- require.NoError(t, db.Close(context.Background()))
+ require.NoError(t, db.Close())
})
})
- t.Run("incompleted upgrade", func(t *testing.T) {
- db := newDB(t)
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
- return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
- }))
- require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade)
- require.NoError(t, db.Close(context.Background()))
-
- require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
- return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
- }))
- require.NoError(t, db.Init(context.Background()))
- require.NoError(t, db.Close(context.Background()))
- })
}
diff --git a/pkg/local_object_storage/metrics/blobovnicza.go b/pkg/local_object_storage/metrics/blobovnicza.go
index 460d6e638..0d0318b3b 100644
--- a/pkg/local_object_storage/metrics/blobovnicza.go
+++ b/pkg/local_object_storage/metrics/blobovnicza.go
@@ -3,10 +3,9 @@ package metrics
import (
"time"
- metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobobvnizcaMetrics) blobovniczatree.Metrics {
@@ -35,26 +34,14 @@ func (m *blobovniczaTreeMetrics) SetParentID(parentID string) {
m.shardID = parentID
}
-func (m *blobovniczaTreeMetrics) SetMode(mod mode.ComponentMode) {
- m.m.SetBlobobvnizcaTreeMode(m.shardID, m.path, mod)
+func (m *blobovniczaTreeMetrics) SetMode(readOnly bool) {
+ m.m.SetBlobobvnizcaTreeMode(m.shardID, m.path, readOnly)
}
func (m *blobovniczaTreeMetrics) Close() {
m.m.CloseBlobobvnizcaTree(m.shardID, m.path)
}
-func (m *blobovniczaTreeMetrics) SetRebuildStatus(status string) {
- m.m.BlobovniczaTreeRebuildStatus(m.shardID, m.path, status)
-}
-
-func (m *blobovniczaTreeMetrics) SetRebuildPercent(value uint32) {
- m.m.BlobovniczaTreeRebuildPercent(m.shardID, m.path, value)
-}
-
-func (m *blobovniczaTreeMetrics) ObjectMoved(d time.Duration) {
- m.m.BlobovniczaTreeObjectMoved(m.shardID, m.path, d)
-}
-
func (m *blobovniczaTreeMetrics) Delete(d time.Duration, success, withStorageID bool) {
m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
}
@@ -88,10 +75,6 @@ func (m *blobovniczaTreeMetrics) Put(d time.Duration, size int, success bool) {
}
}
-func (m *blobovniczaTreeMetrics) ObjectsCount(d time.Duration, success bool) {
- m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "ObjectsCount", d, success, metrics_impl.NullBool{})
-}
-
type blobovniczaMetrics struct {
m metrics_impl.BlobobvnizcaMetrics
shardID func() string
diff --git a/pkg/local_object_storage/metrics/blobstore.go b/pkg/local_object_storage/metrics/blobstore.go
index 9a41f01d0..48249e89c 100644
--- a/pkg/local_object_storage/metrics/blobstore.go
+++ b/pkg/local_object_storage/metrics/blobstore.go
@@ -3,8 +3,8 @@ package metrics
import (
"time"
- metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
type blobstoreMetrics struct {
@@ -63,7 +63,3 @@ func (m *blobstoreMetrics) Put(d time.Duration, size int, success bool) {
m.m.AddPut(m.shardID, size)
}
}
-
-func (m *blobstoreMetrics) ObjectsCount(d time.Duration, success bool) {
- m.m.MethodDuration(m.shardID, "ObjectsCount", d, success, metrics_impl.NullBool{})
-}
diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go
index d93363fa3..d3749d9bc 100644
--- a/pkg/local_object_storage/metrics/fstree.go
+++ b/pkg/local_object_storage/metrics/fstree.go
@@ -3,9 +3,8 @@ package metrics
import (
"time"
- metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
func NewFSTreeMetricsWithoutShardID(path string, m metrics_impl.FSTreeMetrics) fstree.Metrics {
@@ -26,10 +25,9 @@ func (m *fstreeMetrics) SetParentID(parentID string) {
m.shardID = parentID
}
-func (m *fstreeMetrics) SetMode(mod mode.ComponentMode) {
- m.m.SetMode(m.shardID, m.path, mod)
+func (m *fstreeMetrics) SetMode(readOnly bool) {
+ m.m.SetMode(m.shardID, m.path, readOnly)
}
-
func (m *fstreeMetrics) Close() {
m.m.Close(m.shardID, m.path)
}
@@ -37,40 +35,27 @@ func (m *fstreeMetrics) Close() {
func (m *fstreeMetrics) Iterate(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success)
}
-
-func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) {
- m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success)
-}
-
func (m *fstreeMetrics) Delete(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Delete", d, success)
}
-
func (m *fstreeMetrics) Exists(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Exists", d, success)
}
-
func (m *fstreeMetrics) Put(d time.Duration, size int, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Put", d, success)
if success {
m.m.AddPut(m.shardID, m.path, size)
}
}
-
func (m *fstreeMetrics) Get(d time.Duration, size int, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Get", d, success)
if success {
m.m.AddGet(m.shardID, m.path, size)
}
}
-
func (m *fstreeMetrics) GetRange(d time.Duration, size int, success bool) {
m.m.MethodDuration(m.shardID, m.path, "GetRange", d, success)
if success {
m.m.AddGet(m.shardID, m.path, size)
}
}
-
-func (m *fstreeMetrics) ObjectsCount(d time.Duration, success bool) {
- m.m.MethodDuration(m.shardID, m.path, "ObjectsCount", d, success)
-}
diff --git a/pkg/local_object_storage/metrics/metabase.go b/pkg/local_object_storage/metrics/metabase.go
index e962e37cb..d0fb31936 100644
--- a/pkg/local_object_storage/metrics/metabase.go
+++ b/pkg/local_object_storage/metrics/metabase.go
@@ -3,9 +3,9 @@ package metrics
import (
"time"
- metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
func NewMetabaseMetrics(path string, m metrics_impl.MetabaseMetrics) meta.Metrics {
@@ -26,7 +26,7 @@ func (m *metabaseMetrics) SetParentID(parentID string) {
m.shardID = parentID
}
-func (m *metabaseMetrics) SetMode(mode mode.ComponentMode) {
+func (m *metabaseMetrics) SetMode(mode mode.Mode) {
m.m.SetMode(m.shardID, m.path, mode.String())
}
diff --git a/pkg/local_object_storage/metrics/pilorama.go b/pkg/local_object_storage/metrics/pilorama.go
index 050b769a0..21f027a6e 100644
--- a/pkg/local_object_storage/metrics/pilorama.go
+++ b/pkg/local_object_storage/metrics/pilorama.go
@@ -3,9 +3,9 @@ package metrics
import (
"time"
- metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
func NewPiloramaMetrics(m metrics_impl.PiloramaMetrics) pilorama.Metrics {
@@ -24,8 +24,8 @@ func (m *piloramaMetrics) SetParentID(id string) {
m.shardID = id
}
-func (m *piloramaMetrics) SetMode(mod mode.ComponentMode) {
- m.m.SetMode(m.shardID, mod)
+func (m *piloramaMetrics) SetMode(mode mode.Mode) {
+ m.m.SetMode(m.shardID, mode)
}
func (m *piloramaMetrics) Close() {
diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go
index 4c5238921..c65488b74 100644
--- a/pkg/local_object_storage/pilorama/batch.go
+++ b/pkg/local_object_storage/pilorama/batch.go
@@ -1,9 +1,8 @@
package pilorama
import (
- "cmp"
"encoding/binary"
- "slices"
+ "sort"
"sync"
"time"
@@ -48,10 +47,10 @@ func (b *batch) run() {
// Sorting without a mutex is ok, because we append to this slice only if timer is non-nil.
// See (*boltForest).addBatch for details.
- slices.SortFunc(b.operations, func(mi, mj *Move) int {
- return cmp.Compare(mi.Time, mj.Time)
+ sort.Slice(b.operations, func(i, j int) bool {
+ return b.operations[i].Time < b.operations[j].Time
})
- b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time })
+ b.operations = removeDuplicatesInPlace(b.operations)
// Our main use-case is addition of new items. In this case,
// we do not need to perform undo()/redo(), just do().
@@ -116,3 +115,15 @@ func (b *batch) run() {
b.results[i] <- err
}
}
+
+func removeDuplicatesInPlace(a []*Move) []*Move {
+ equalCount := 0
+ for i := 1; i < len(a); i++ {
+ if a[i].Time == a[i-1].Time {
+ equalCount++
+ } else {
+ a[i-equalCount] = a[i]
+ }
+ }
+ return a[:len(a)-equalCount]
+}
diff --git a/pkg/local_object_storage/pilorama/batch_test.go b/pkg/local_object_storage/pilorama/batch_test.go
new file mode 100644
index 000000000..931fce18c
--- /dev/null
+++ b/pkg/local_object_storage/pilorama/batch_test.go
@@ -0,0 +1,70 @@
+package pilorama
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_removeDuplicatesInPlace(t *testing.T) {
+ testCases := []struct {
+ before []int
+ after []int
+ }{
+ {
+ before: []int{},
+ after: []int{},
+ },
+ {
+ before: []int{1},
+ after: []int{1},
+ },
+ {
+ before: []int{1, 2},
+ after: []int{1, 2},
+ },
+ {
+ before: []int{1, 2, 3},
+ after: []int{1, 2, 3},
+ },
+ {
+ before: []int{1, 1, 2},
+ after: []int{1, 2},
+ },
+ {
+ before: []int{1, 2, 2},
+ after: []int{1, 2},
+ },
+ {
+ before: []int{1, 2, 2, 3},
+ after: []int{1, 2, 3},
+ },
+ {
+ before: []int{1, 1, 1},
+ after: []int{1},
+ },
+ {
+ before: []int{1, 1, 2, 2},
+ after: []int{1, 2},
+ },
+ {
+ before: []int{1, 1, 1, 2, 3, 3, 3},
+ after: []int{1, 2, 3},
+ },
+ }
+
+ for _, tc := range testCases {
+ ops := make([]*Move, len(tc.before))
+ for i := range ops {
+ ops[i] = &Move{Meta: Meta{Time: Timestamp(tc.before[i])}}
+ }
+
+ expected := make([]*Move, len(tc.after))
+ for i := range expected {
+ expected[i] = &Move{Meta: Meta{Time: Timestamp(tc.after[i])}}
+ }
+
+ actual := removeDuplicatesInPlace(ops)
+ require.Equal(t, expected, actual, "%d", tc.before)
+ }
+}
diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go
index 3156751f2..3d5ff1a7c 100644
--- a/pkg/local_object_storage/pilorama/bench_test.go
+++ b/pkg/local_object_storage/pilorama/bench_test.go
@@ -8,7 +8,6 @@ import (
"sync/atomic"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
)
@@ -27,11 +26,10 @@ func BenchmarkCreate(b *testing.B) {
f := NewBoltForest(
WithPath(filepath.Join(tmpDir, "test.db")),
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
- require.NoError(b, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(b, f.Init(context.Background()))
- defer func() { require.NoError(b, f.Close(context.Background())) }()
-
+ require.NoError(b, f.Open(context.Background(), false))
+ require.NoError(b, f.Init())
b.Cleanup(func() {
+ require.NoError(b, f.Close())
require.NoError(b, os.RemoveAll(tmpDir))
})
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 897b37ea0..2689e345a 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -9,7 +9,6 @@ import (
"math/rand"
"os"
"path/filepath"
- "slices"
"strconv"
"sync"
"time"
@@ -81,7 +80,6 @@ func NewBoltForest(opts ...Option) ForestStorage {
openFile: os.OpenFile,
metrics: &noopMetrics{},
},
- mode: mode.Disabled,
}
for i := range opts {
@@ -91,7 +89,7 @@ func NewBoltForest(opts ...Option) ForestStorage {
return &b
}
-func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
+func (t *boltForest) SetMode(m mode.Mode) error {
t.modeMtx.Lock()
defer t.modeMtx.Unlock()
@@ -99,36 +97,24 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
- err := t.Close(ctx)
+ err := t.Close()
if err == nil && !m.NoMetabase() {
- if err = t.openBolt(m); err == nil {
- err = t.Init(ctx)
+ if err = t.Open(context.TODO(), m.ReadOnly()); err == nil {
+ err = t.Init()
}
}
if err != nil {
- return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
+ return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
- t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+ t.metrics.SetMode(m)
return nil
}
-
-func (t *boltForest) Open(_ context.Context, mode mode.Mode) error {
- t.modeMtx.Lock()
- defer t.modeMtx.Unlock()
- t.mode = mode
- if mode.NoMetabase() {
- return nil
- }
- return t.openBolt(mode)
-}
-
-func (t *boltForest) openBolt(m mode.Mode) error {
- readOnly := m.ReadOnly()
+func (t *boltForest) Open(_ context.Context, readOnly bool) error {
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
+ return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@@ -139,16 +125,19 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
- return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
+ return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
t.db.MaxBatchDelay = t.maxBatchDelay
- t.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
+ m := mode.ReadWrite
+ if readOnly {
+ m = mode.ReadOnly
+ }
+ t.metrics.SetMode(m)
return nil
}
-
-func (t *boltForest) Init(context.Context) error {
+func (t *boltForest) Init() error {
if t.mode.NoMetabase() || t.db.IsReadOnly() {
return nil
}
@@ -161,8 +150,7 @@ func (t *boltForest) Init(context.Context) error {
return err
})
}
-
-func (t *boltForest) Close(context.Context) error {
+func (t *boltForest) Close() error {
var err error
if t.db != nil {
err = t.db.Close()
@@ -419,7 +407,10 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri
return err
}
- i, node := t.getPathPrefix(bTree, attr, path)
+ i, node, err := t.getPathPrefix(bTree, attr, path)
+ if err != nil {
+ return err
+ }
ts := t.getLatestTimestamp(bLog, d.Position, d.Size)
lm = make([]Move, len(path)-i+1)
@@ -555,132 +546,6 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string
return metaerr.Wrap(err)
}
-func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- m, err := t.filterSeen(cnr, treeID, m)
- if err != nil {
- return err
- }
- if len(m) == 0 {
- success = true
- return nil
- }
-
- ch := make(chan error)
- b := &batch{
- forest: t,
- cid: cnr,
- treeID: treeID,
- results: []chan<- error{ch},
- operations: m,
- }
- go func() {
- b.run()
- }()
- err = <-ch
- success = err == nil
- return metaerr.Wrap(err)
-}
-
-func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) {
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- ops := make([]*Move, 0, len(m))
- err := t.db.View(func(tx *bbolt.Tx) error {
- treeRoot := tx.Bucket(bucketName(cnr, treeID))
- if treeRoot == nil {
- ops = m
- return nil
- }
- b := treeRoot.Bucket(logBucket)
- for _, op := range m {
- var logKey [8]byte
- binary.BigEndian.PutUint64(logKey[:], op.Time)
- seen := b.Get(logKey[:]) != nil
- if !seen {
- ops = append(ops, op)
- }
- }
- return nil
- })
- if err != nil {
- return nil, metaerr.Wrap(err)
- }
- return ops, nil
-}
-
-// TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed.
-func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeApplyStream", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyStream",
- trace.WithAttributes(
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return ErrDegradedMode
- } else if t.mode.ReadOnly() {
- return ErrReadOnlyMode
- }
-
- fullID := bucketName(cnr, treeID)
- err := metaerr.Wrap(t.db.Update(func(tx *bbolt.Tx) error {
- bLog, bTree, err := t.getTreeBuckets(tx, fullID)
- if err != nil {
- return err
- }
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case m, ok := <-source:
- if !ok {
- return nil
- }
- var lm Move
- if e := t.applyOperation(bLog, bTree, []*Move{m}, &lm); e != nil {
- return e
- }
- }
- }
- }))
- success = err == nil
- return err
-}
-
func (t *boltForest) addBatch(cnr cidSDK.ID, treeID string, m *Move, ch chan error) {
t.mtx.Lock()
for i := 0; i < len(t.batches); i++ {
@@ -775,7 +640,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := range ms {
+ for i := 0; i < len(ms); i++ {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
@@ -977,7 +842,10 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
b := treeRoot.Bucket(dataBucket)
- i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1])
+ i, curNode, err := t.getPathPrefix(b, attr, path[:len(path)-1])
+ if err != nil {
+ return err
+ }
if i < len(path)-1 {
return nil
}
@@ -986,23 +854,21 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st
c := b.Cursor()
- for i := range curNodes {
- attrKey := internalKey(nil, attr, path[len(path)-1], curNodes[i], 0)
- attrKey = attrKey[:len(attrKey)-8]
- childKey, _ := c.Seek(attrKey)
- for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
- child := binary.LittleEndian.Uint64(childKey[len(childKey)-8:])
- if latest {
- _, ts, _, _ := t.getState(b, stateKey(make([]byte, 9), child))
- if ts >= maxTimestamp {
- nodes = append(nodes[:0], child)
- maxTimestamp = ts
- }
- } else {
- nodes = append(nodes, child)
+ attrKey := internalKey(nil, attr, path[len(path)-1], curNode, 0)
+ attrKey = attrKey[:len(attrKey)-8]
+ childKey, _ := c.Seek(attrKey)
+ for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
+ child := binary.LittleEndian.Uint64(childKey[len(childKey)-8:])
+ if latest {
+ _, ts, _, _ := t.getState(b, stateKey(make([]byte, 9), child))
+ if ts >= maxTimestamp {
+ nodes = append(nodes[:0], child)
+ maxTimestamp = ts
}
- childKey, _ = c.Next()
+ } else {
+ nodes = append(nodes, child)
}
+ childKey, _ = c.Next()
}
return nil
}))
@@ -1058,194 +924,6 @@ func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID stri
return m, parentID, metaerr.Wrap(err)
}
-func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshold int) bool {
- key := make([]byte, 9)
- key[0] = 'c'
-
- count := 0
- for _, nodeID := range nodeIDs {
- binary.LittleEndian.PutUint64(key[1:], nodeID)
-
- c := b.Cursor()
- for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() {
- if count++; count > threshold {
- return false
- }
- }
- }
- return true
-}
-
-// TreeSortedByFilename implements the Forest interface.
-func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeSortedByFilename", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeSortedByFilename",
- trace.WithAttributes(
- attribute.String("container_id", cid.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return nil, last, ErrDegradedMode
- }
- if len(nodeIDs) == 0 {
- return nil, last, errors.New("empty node list")
- }
-
- h := newHeap(last, count)
- key := make([]byte, 9)
-
- var result []NodeInfo
- var fewChildren bool
-
- err := t.db.View(func(tx *bbolt.Tx) error {
- treeRoot := tx.Bucket(bucketName(cid, treeID))
- if treeRoot == nil {
- return ErrTreeNotFound
- }
-
- b := treeRoot.Bucket(dataBucket)
-
- // If the node is a leaf, we could scan all filenames in the tree.
- // To prevent this we first count the number of children: if it is less than
- // the number of nodes we need to return, fallback to TreeGetChildren() implementation.
- if fewChildren = t.hasFewChildren(b, nodeIDs, count); fewChildren {
- var err error
- result, err = t.getChildren(b, nodeIDs)
- return err
- }
-
- t.fillSortedChildren(b, nodeIDs, h)
-
- for info, ok := h.pop(); ok; info, ok = h.pop() {
- for _, id := range info.id {
- childInfo, err := t.getChildInfo(b, key, id)
- if err != nil {
- return err
- }
- result = append(result, childInfo)
- }
- }
- return nil
- })
-
- success = err == nil
- if err != nil {
- return nil, last, metaerr.Wrap(err)
- }
-
- if fewChildren {
- result = sortAndCut(result, last)
- }
- res := mergeNodeInfos(result)
- if len(res) > count {
- res = res[:count]
- }
- if len(res) != 0 {
- s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
- last = NewCursor(s, res[len(res)-1].LastChild())
- }
- return res, last, metaerr.Wrap(err)
-}
-
-func sortByFilename(nodes []NodeInfo) {
- slices.SortFunc(nodes, func(a, b NodeInfo) int {
- return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename))
- })
-}
-
-func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo {
- var lastBytes []byte
- if last != nil {
- lastBytes = []byte(last.GetFilename())
- }
- sortByFilename(result)
-
- for i := range result {
- if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
- return result[i:]
- }
- }
- return nil
-}
-
-func (t *boltForest) getChildInfo(b *bbolt.Bucket, key []byte, childID Node) (NodeInfo, error) {
- childInfo := NodeInfo{ID: childID}
- parentID, _, metaBytes, found := t.getState(b, stateKey(key, childID))
- if found {
- childInfo.ParentID = parentID
- if err := childInfo.Meta.FromBytes(metaBytes); err != nil {
- return NodeInfo{}, err
- }
- }
- return childInfo, nil
-}
-
-func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *fixedHeap) {
- c := b.Cursor()
- prefix := internalKeyPrefix(nil, AttributeFilename)
-
- length := uint16(0)
- count := 0
-
- var nodes []uint64
- var lastFilename *string
- for k, _ := c.Seek(prefix); len(k) > 0 && k[0] == 'i'; k, _ = c.Next() {
- if len(k) < len(prefix)+2+16 {
- continue
- }
-
- parentID := binary.LittleEndian.Uint64(k[len(k)-16:])
- if !slices.Contains(nodeIDs, parentID) {
- continue
- }
-
- actualLength := binary.LittleEndian.Uint16(k[len(prefix):])
- childID := binary.LittleEndian.Uint64(k[len(k)-8:])
- filename := string(k[len(prefix)+2 : len(k)-16])
-
- if lastFilename == nil {
- lastFilename = &filename
- nodes = append(nodes, childID)
- } else if *lastFilename == filename {
- nodes = append(nodes, childID)
- } else {
- processed := h.push(nodes, *lastFilename)
- nodes = MultiNode{childID}
- lastFilename = &filename
- if actualLength != length {
- length = actualLength
- count = 1
- } else if processed {
- if count++; count > h.count {
- lastFilename = nil
- nodes = nil
- length = actualLength + 1
- count = 0
- c.Seek(binary.LittleEndian.AppendUint16(prefix, length))
- c.Prev() // c.Next() will be performed by for loop
- }
- }
- }
- }
-
- if len(nodes) != 0 && lastFilename != nil {
- h.push(nodes, *lastFilename)
- }
-}
-
// TreeGetChildren implements the Forest interface.
func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) {
var (
@@ -1272,6 +950,10 @@ func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID
return nil, ErrDegradedMode
}
+ key := make([]byte, 9)
+ key[0] = 'c'
+ binary.LittleEndian.PutUint64(key[1:], nodeID)
+
var result []NodeInfo
err := t.db.View(func(tx *bbolt.Tx) error {
@@ -1281,34 +963,25 @@ func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID
}
b := treeRoot.Bucket(dataBucket)
-
- var err error
- result, err = t.getChildren(b, []Node{nodeID})
- return err
- })
- success = err == nil
- return result, metaerr.Wrap(err)
-}
-
-func (t *boltForest) getChildren(b *bbolt.Bucket, nodeIDs MultiNode) ([]NodeInfo, error) {
- var result []NodeInfo
-
- key := make([]byte, 9)
- for _, nodeID := range nodeIDs {
- key[0] = 'c'
- binary.LittleEndian.PutUint64(key[1:], nodeID)
-
c := b.Cursor()
for k, _ := c.Seek(key); len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == nodeID; k, _ = c.Next() {
childID := binary.LittleEndian.Uint64(k[9:])
- childInfo, err := t.getChildInfo(b, key, childID)
- if err != nil {
- return nil, err
+ childInfo := NodeInfo{
+ ID: childID,
+ }
+ parentID, _, metaBytes, found := t.getState(b, stateKey(key, childID))
+ if found {
+ childInfo.ParentID = parentID
+ if err := childInfo.Meta.FromBytes(metaBytes); err != nil {
+ return err
+ }
}
result = append(result, childInfo)
}
- }
- return result, nil
+ return nil
+ })
+ success = err == nil
+ return result, metaerr.Wrap(err)
}
// TreeList implements the Forest interface.
@@ -1354,7 +1027,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
- return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
+ return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
}
success = true
return ids, nil
@@ -1439,12 +1112,11 @@ func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string)
c := tx.Cursor()
prefix := make([]byte, 32)
cid.Encode(prefix)
- for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Seek(prefix) {
+ for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() {
err := tx.DeleteBucket(k)
if err != nil {
return err
}
- _, _ = c.First() // rewind the cursor to the root page
}
return nil
}
@@ -1458,98 +1130,7 @@ func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string)
return err
}
-// TreeListTrees implements ForestStorage.
-func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) {
- var (
- startedAt = time.Now()
- success = false
- )
- defer func() {
- t.metrics.AddMethodDuration("TreeListTrees", time.Since(startedAt), success)
- }()
-
- _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeListTrees")
- defer span.End()
-
- t.modeMtx.RLock()
- defer t.modeMtx.RUnlock()
-
- if t.mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- batchSize := prm.BatchSize
- if batchSize <= 0 {
- batchSize = treeListTreesBatchSizeDefault
- }
- var res TreeListTreesResult
- err := metaerr.Wrap(t.db.View(func(tx *bbolt.Tx) error {
- c := tx.Cursor()
- checkNextPageToken := true
- for k, _ := c.Seek(prm.NextPageToken); k != nil; k, _ = c.Next() {
- if bytes.Equal(k, dataBucket) || bytes.Equal(k, logBucket) {
- continue
- }
-
- if checkNextPageToken && bytes.Equal(k, prm.NextPageToken) {
- checkNextPageToken = false
- continue
- }
-
- var contID cidSDK.ID
- if err := contID.Decode(k[:32]); err != nil {
- return fmt.Errorf("decode container ID: %w", err)
- }
- res.Items = append(res.Items, ContainerIDTreeID{
- CID: contID,
- TreeID: string(k[32:]),
- })
-
- if len(res.Items) == batchSize {
- res.NextPageToken = bytes.Clone(k)
- break
- }
- }
- return nil
- }))
- success = err == nil
- if err != nil {
- return nil, err
- }
- return &res, nil
-}
-
-func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) {
- c := bTree.Cursor()
-
- var curNodes []Node
- nextNodes := []Node{RootID}
- var attrKey []byte
-
- for i := range path {
- curNodes, nextNodes = nextNodes, curNodes[:0]
- for j := range curNodes {
- attrKey = internalKey(attrKey, attr, path[i], curNodes[j], 0)
- attrKey = attrKey[:len(attrKey)-8]
-
- childKey, value := c.Seek(attrKey)
- for len(childKey) == len(attrKey)+8 && bytes.Equal(attrKey, childKey[:len(childKey)-8]) {
- if len(value) == 1 && value[0] == 1 {
- nextNodes = append(nextNodes, binary.LittleEndian.Uint64(childKey[len(childKey)-8:]))
- }
- childKey, value = c.Next()
- }
- }
-
- if len(nextNodes) == 0 {
- return i, curNodes
- }
- }
-
- return len(path), nextNodes
-}
-
-func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) {
+func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) {
c := bTree.Cursor()
var curNode Node
@@ -1569,10 +1150,10 @@ loop:
childKey, value = c.Next()
}
- return i, curNode
+ return i, curNode, nil
}
- return len(path), curNode
+ return len(path), curNode, nil
}
func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
@@ -1582,12 +1163,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
- return lm.FromBytes(data[16:])
+ return lm.Meta.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
- size := 8 + 8 + lm.Size() + 1
+ size := 8 + 8 + lm.Meta.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@@ -1595,7 +1176,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
- lm.EncodeBinary(w.BinWriter)
+ lm.Meta.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)
@@ -1652,16 +1233,6 @@ func childrenKey(key []byte, child, parent Node) []byte {
return key[:childrenKeySize]
}
-func internalKeyPrefix(key []byte, k string) []byte {
- key = key[:0]
- key = append(key, 'i')
-
- l := len(k)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
- key = append(key, k...)
- return key
-}
-
// 'i' + attribute name (string) + attribute value (string) + parent (id) + node (id) -> 0/1.
func internalKey(key []byte, k, v string, parent, node Node) []byte {
size := 1 /* prefix */ + 2*2 /* len */ + 2*8 /* nodes */ + len(k) + len(v)
@@ -1669,13 +1240,22 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte {
key = make([]byte, 0, size)
}
- key = internalKeyPrefix(key, k)
+ key = key[:0]
+ key = append(key, 'i')
- l := len(v)
- key = binary.LittleEndian.AppendUint16(key, uint16(l))
+ l := len(k)
+ key = append(key, byte(l), byte(l>>8))
+ key = append(key, k...)
+
+ l = len(v)
+ key = append(key, byte(l), byte(l>>8))
key = append(key, v...)
- key = binary.LittleEndian.AppendUint64(key, parent)
- key = binary.LittleEndian.AppendUint64(key, node)
+ var raw [8]byte
+ binary.LittleEndian.PutUint64(raw[:], parent)
+ key = append(key, raw[:]...)
+
+ binary.LittleEndian.PutUint64(raw[:], node)
+ key = append(key, raw[:]...)
return key
}
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index ebfd0bcc0..e5612d2b9 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -2,9 +2,6 @@ package pilorama
import (
"context"
- "errors"
- "fmt"
- "slices"
"sort"
"strings"
@@ -12,8 +9,6 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
-var errInvalidKeyFormat = errors.New("invalid format: key must be cid and treeID")
-
// memoryForest represents multiple replicating trees sharing a single storage.
type memoryForest struct {
// treeMap maps tree identifier (container ID + name) to the replicated log.
@@ -76,8 +71,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
Parent: node,
Meta: Meta{
Time: s.timestamp(d.Position, d.Size),
- Items: []KeyValue{{Key: attr, Value: []byte(path[j])}},
- },
+ Items: []KeyValue{{Key: attr, Value: []byte(path[j])}}},
Child: s.findSpareID(),
})
lm[j-i] = op.Move
@@ -85,7 +79,8 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
- mCopy := slices.Clone(m)
+ mCopy := make([]KeyValue, len(m))
+ copy(mCopy, m)
op := s.do(&Move{
Parent: node,
Meta: Meta{
@@ -111,28 +106,17 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o
return s.Apply(op)
}
-func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error {
- for _, op := range ops {
- if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil {
- return err
- }
- }
+func (f *memoryForest) Init() error {
return nil
}
-func (f *memoryForest) Init(context.Context) error {
+func (f *memoryForest) Open(context.Context, bool) error {
return nil
}
-
-func (f *memoryForest) Open(context.Context, mode.Mode) error {
+func (f *memoryForest) SetMode(mode.Mode) error {
return nil
}
-
-func (f *memoryForest) SetMode(context.Context, mode.Mode) error {
- return nil
-}
-
-func (f *memoryForest) Close(context.Context) error {
+func (f *memoryForest) Close() error {
return nil
}
func (f *memoryForest) SetParentID(string) {}
@@ -163,57 +147,6 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
return s.infoMap[nodeID].Meta, s.infoMap[nodeID].Parent, nil
}
-// TreeSortedByFilename implements the Forest interface.
-func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) {
- fullID := cid.String() + "/" + treeID
- s, ok := f.treeMap[fullID]
- if !ok {
- return nil, start, ErrTreeNotFound
- }
- if count == 0 {
- return nil, start, nil
- }
-
- var res []NodeInfo
-
- for _, nodeID := range nodeIDs {
- children := s.getChildren(nodeID)
- for _, childID := range children {
- var found bool
- for _, kv := range s.infoMap[childID].Meta.Items {
- if kv.Key == AttributeFilename {
- found = true
- break
- }
- }
- if !found {
- continue
- }
- res = append(res, NodeInfo{
- ID: childID,
- Meta: s.infoMap[childID].Meta,
- ParentID: s.infoMap[childID].Parent,
- })
- }
- }
- if len(res) == 0 {
- return nil, start, nil
- }
-
- sortByFilename(res)
-
- r := mergeNodeInfos(res)
- for i := range r {
- if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() {
- finish := min(len(res), i+count)
- last := string(findAttr(r[finish-1].Meta, AttributeFilename))
- return r[i:finish], NewCursor(last, 0), nil
- }
- }
- last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
- return nil, NewCursor(last, 0), nil
-}
-
// TreeGetChildren implements the Forest interface.
func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]NodeInfo, error) {
fullID := cid.String() + "/" + treeID
@@ -222,7 +155,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
- children := s.getChildren(nodeID)
+ children := s.tree.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{
@@ -324,77 +257,3 @@ func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID
}
return t.syncHeight, nil
}
-
-// TreeListTrees implements Forest.
-func (f *memoryForest) TreeListTrees(_ context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error) {
- batchSize := prm.BatchSize
- if batchSize <= 0 {
- batchSize = treeListTreesBatchSizeDefault
- }
- tmpSlice := make([]string, 0, len(f.treeMap))
- for k := range f.treeMap {
- tmpSlice = append(tmpSlice, k)
- }
- sort.Strings(tmpSlice)
- var idx int
- if len(prm.NextPageToken) > 0 {
- last := string(prm.NextPageToken)
- idx, _ = sort.Find(len(tmpSlice), func(i int) int {
- return -1 * strings.Compare(tmpSlice[i], last)
- })
- if idx == len(tmpSlice) {
- return &TreeListTreesResult{}, nil
- }
- if tmpSlice[idx] == last {
- idx++
- }
- }
-
- var result TreeListTreesResult
- for idx < len(tmpSlice) {
- cidAndTree := strings.Split(tmpSlice[idx], "/")
- if len(cidAndTree) != 2 {
- return nil, errInvalidKeyFormat
- }
- var contID cid.ID
- if err := contID.DecodeString(cidAndTree[0]); err != nil {
- return nil, fmt.Errorf("invalid format: %w", err)
- }
-
- result.Items = append(result.Items, ContainerIDTreeID{
- CID: contID,
- TreeID: cidAndTree[1],
- })
-
- if len(result.Items) == batchSize {
- result.NextPageToken = []byte(tmpSlice[idx])
- break
- }
- idx++
- }
- return &result, nil
-}
-
-// TreeApplyStream implements ForestStorage.
-func (f *memoryForest) TreeApplyStream(ctx context.Context, cnr cid.ID, treeID string, source <-chan *Move) error {
- fullID := cnr.String() + "/" + treeID
- s, ok := f.treeMap[fullID]
- if !ok {
- s = newMemoryTree()
- f.treeMap[fullID] = s
- }
-
- for {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case m, ok := <-source:
- if !ok {
- return nil
- }
- if e := s.Apply(m); e != nil {
- return e
- }
- }
- }
-}
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 844084c55..8e7fec200 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1,46 +1,47 @@
package pilorama
import (
- "bytes"
"context"
"crypto/rand"
"fmt"
mrand "math/rand"
"path/filepath"
- "slices"
"strconv"
- "strings"
"sync"
"testing"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "github.com/google/uuid"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
var providers = []struct {
name string
- construct func(t testing.TB, opts ...Option) ForestStorage
+ construct func(t testing.TB, opts ...Option) Forest
}{
- {"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
+ {"inmemory", func(t testing.TB, _ ...Option) Forest {
f := NewMemoryForest()
- require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ require.NoError(t, f.Open(context.Background(), false))
+ require.NoError(t, f.Init())
+ t.Cleanup(func() {
+ require.NoError(t, f.Close())
+ })
+
return f
}},
- {"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
+ {"bbolt", func(t testing.TB, opts ...Option) Forest {
f := NewBoltForest(
append([]Option{
WithPath(filepath.Join(t.TempDir(), "test.db")),
- WithMaxBatchSize(1),
- }, opts...)...)
- require.NoError(t, f.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, f.Init(context.Background()))
+ WithMaxBatchSize(1)}, opts...)...)
+ require.NoError(t, f.Open(context.Background(), false))
+ require.NoError(t, f.Init())
+ t.Cleanup(func() {
+ require.NoError(t, f.Close())
+ })
return f
}},
}
@@ -60,17 +61,14 @@ func TestForest_TreeMove(t *testing.T) {
}
}
-func testForestTreeMove(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testForestTreeMove(t *testing.T, s Forest) {
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")},
- }
+ {Key: AttributeFilename, Value: []byte("file.txt")}}
lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
@@ -124,9 +122,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
}
}
-func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testForestTreeGetChildren(t *testing.T, s Forest) {
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
@@ -182,268 +178,6 @@ func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
})
}
-func BenchmarkForestSortedIteration(b *testing.B) {
- for i := range providers {
- if providers[i].name == "inmemory" {
- continue
- }
-
- cnr := cidtest.ID()
- treeID := "version"
- f := providers[i].construct(b)
-
- const total = 100_000
- d := CIDDescriptor{cnr, 0, 1}
- for i := range total {
- u, err := uuid.NewRandom()
- if err != nil {
- b.FailNow()
- }
-
- _, err = f.TreeMove(context.Background(), d, treeID, &Move{
- Parent: RootID,
- Child: RootID + Node(i+1),
- Meta: Meta{
- Time: Timestamp(i + 1),
- Items: []KeyValue{{
- Key: AttributeFilename, Value: []byte(u.String()),
- }},
- },
- })
- if err != nil {
- b.FailNow()
- }
- }
-
- b.Run(providers[i].name+",root", func(b *testing.B) {
- for range b.N {
- res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
- if err != nil || len(res) != 100 {
- b.Fatalf("err %v, count %d", err, len(res))
- }
- }
- })
- b.Run(providers[i].name+",leaf", func(b *testing.B) {
- for range b.N {
- res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
- if err != nil || len(res) != 0 {
- b.FailNow()
- }
- }
- })
- }
-}
-
-// The issue which we call "BugWithSkip" is easiest to understand when filenames are
-// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
-// The bug happens when we switch between length during listing.
-// Thus this test contains numbers from 1 to 2000 and batch size of size 10.
-func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
- for i := range providers {
- t.Run(providers[i].name, func(t *testing.T) {
- testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
- })
- }
-}
-
-func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
- cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
- treeID := "version"
- treeAdd := func(t *testing.T, ts int, filename string) {
- _, err := s.TreeMove(context.Background(), d, treeID, &Move{
- Child: RootID + uint64(ts),
- Parent: RootID,
- Meta: Meta{
- Time: Timestamp(ts),
- Items: []KeyValue{
- {Key: AttributeFilename, Value: []byte(filename)},
- },
- },
- })
- require.NoError(t, err)
- }
-
- const count = 2000
- treeAdd(t, 1, "")
- for i := 1; i < count; i++ {
- treeAdd(t, i+1, strconv.Itoa(i+1))
- }
-
- var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
- res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
- require.NoError(t, err)
- result = append(result, res...)
- return cursor
- }
-
- const batchSize = 10
- last := treeAppend(t, nil, batchSize)
- for i := 1; i < count/batchSize; i++ {
- last = treeAppend(t, last, batchSize)
- }
- require.Len(t, result, count)
- require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int {
- filenameA := findAttr(a.Meta, AttributeFilename)
- filenameB := findAttr(b.Meta, AttributeFilename)
- return bytes.Compare(filenameA, filenameB)
- }))
-}
-
-func TestForest_TreeSortedIteration(t *testing.T) {
- for i := range providers {
- t.Run(providers[i].name, func(t *testing.T) {
- testForestTreeSortedIteration(t, providers[i].construct(t))
- })
- }
-}
-
-func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
- cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
- treeID := "version"
- treeAdd := func(t *testing.T, ts int, filename string) {
- _, err := s.TreeMove(context.Background(), d, treeID, &Move{
- Child: RootID + uint64(ts),
- Parent: RootID,
- Meta: Meta{
- Time: Timestamp(ts),
- Items: []KeyValue{
- {Key: AttributeFilename, Value: []byte(filename)},
- },
- },
- })
- require.NoError(t, err)
- }
-
- const count = 9
- treeAdd(t, 1, "")
- for i := 1; i < count; i++ {
- treeAdd(t, i+1, strconv.Itoa(i+1))
- }
-
- var result []MultiNodeInfo
- treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor {
- res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
- require.NoError(t, err)
- result = append(result, res...)
- return cursor
- }
-
- last := treeAppend(t, nil, 2)
- last = treeAppend(t, last, 3)
- last = treeAppend(t, last, 0)
- last = treeAppend(t, last, 1)
- _ = treeAppend(t, last, 10)
-
- require.Len(t, result, count)
- for i := range result {
- require.Equal(t, MultiNode{RootID + uint64(i+1)}, result[i].Children)
- if i == 0 {
- require.Equal(t, "", string(findAttr(result[i].Meta, AttributeFilename)))
- } else {
- require.Equal(t, strconv.Itoa(RootID+i+1), string(findAttr(result[i].Meta, AttributeFilename)))
- }
- }
-}
-
-func TestForest_TreeSortedFilename(t *testing.T) {
- for i := range providers {
- t.Run(providers[i].name, func(t *testing.T) {
- testForestTreeSortedByFilename(t, providers[i].construct(t))
- })
- }
-}
-
-func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
- const controlAttr = "control_attr"
- cid := cidtest.ID()
- d := CIDDescriptor{cid, 0, 1}
- treeID := "version"
-
- treeAddByPath := func(t *testing.T, filename string) {
- path := strings.Split(filename, "/")
- _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, path[:len(path)-1],
- []KeyValue{
- {Key: AttributeFilename, Value: []byte(path[len(path)-1])},
- {Key: controlAttr, Value: []byte(filename)},
- },
- )
- require.NoError(t, err)
- }
-
- expectAttributes := func(t *testing.T, attr string, expected []string, res []MultiNodeInfo) {
- require.Equal(t, len(expected), len(res))
-
- actual := make([]string, len(res))
- for i := range actual {
- actual[i] = string(findAttr(res[i].Meta, attr))
- }
- require.Equal(t, expected, actual)
- }
-
- items := []string{
- "a/bbb/ccc",
- "a/bbb/xxx",
- "a/bbb/z",
- "b/bbb/ccc",
- "b/xxx/z",
- "c",
- }
-
- // Ensure we do not depend on insertion order in any way.
- mrand.Shuffle(len(items), func(i, j int) {
- items[i], items[j] = items[j], items[i]
- })
- for i := range items {
- treeAddByPath(t, items[i])
- }
-
- getChildren := func(t *testing.T, id MultiNode) []MultiNodeInfo {
- res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, nil, len(items))
- require.NoError(t, err)
- return res
- }
-
- res := getChildren(t, MultiNode{RootID})
- expectAttributes(t, AttributeFilename, []string{"a", "b", "c"}, res)
- expectAttributes(t, controlAttr, []string{"", "", "c"}, res)
-
- {
- ra := getChildren(t, res[0].Children)
- expectAttributes(t, AttributeFilename, []string{"bbb"}, ra)
- expectAttributes(t, controlAttr, []string{""}, ra)
-
- rabbb := getChildren(t, ra[0].Children)
- expectAttributes(t, AttributeFilename, []string{"ccc", "xxx", "z"}, rabbb)
- expectAttributes(t, controlAttr, []string{"a/bbb/ccc", "a/bbb/xxx", "a/bbb/z"}, rabbb)
- }
- {
- rb := getChildren(t, res[1].Children)
- expectAttributes(t, AttributeFilename, []string{"bbb", "xxx"}, rb)
- expectAttributes(t, controlAttr, []string{"", ""}, rb)
-
- rbbbb := getChildren(t, rb[0].Children)
- expectAttributes(t, AttributeFilename, []string{"ccc"}, rbbbb)
- expectAttributes(t, controlAttr, []string{"b/bbb/ccc"}, rbbbb)
-
- rbxxx := getChildren(t, rb[1].Children)
- expectAttributes(t, AttributeFilename, []string{"z"}, rbxxx)
- expectAttributes(t, controlAttr, []string{"b/xxx/z"}, rbxxx)
- }
- {
- rc := getChildren(t, res[2].Children)
- require.Len(t, rc, 0)
- }
-}
-
func TestForest_TreeDrop(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
@@ -452,9 +186,7 @@ func TestForest_TreeDrop(t *testing.T) {
}
}
-func testForestTreeDrop(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testForestTreeDrop(t *testing.T, s Forest) {
const cidsSize = 3
var cids [cidsSize]cidSDK.ID
@@ -522,17 +254,14 @@ func TestForest_TreeAdd(t *testing.T) {
}
}
-func testForestTreeAdd(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testForestTreeAdd(t *testing.T, s Forest) {
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")},
- }
+ {Key: AttributeFilename, Value: []byte("file.txt")}}
m := &Move{
Parent: RootID,
Child: RootID,
@@ -570,17 +299,14 @@ func TestForest_TreeAddByPath(t *testing.T) {
}
}
-func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testForestTreeAddByPath(t *testing.T, s Forest) {
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte("file.txt")},
- }
+ {Key: AttributeFilename, Value: []byte("file.txt")}}
t.Run("invalid descriptor", func(t *testing.T) {
_, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
@@ -655,8 +381,7 @@ func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
testMeta(t, s, cid, treeID, oldMove.Child, oldMove.Parent,
Meta{Time: oldMove.Time, Items: []KeyValue{
{AttributeVersion, []byte("SomeValue")},
- {AttributeFilename, []byte("another")},
- }})
+ {AttributeFilename, []byte("another")}}})
t.Run("get by path", func(t *testing.T) {
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false)
@@ -674,8 +399,7 @@ func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
t.Run("empty component", func(t *testing.T) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
- {Key: AttributeFilename, Value: []byte{}},
- }
+ {Key: AttributeFilename, Value: []byte{}}}
lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -695,7 +419,7 @@ func TestForest_Apply(t *testing.T) {
}
}
-func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
+func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
cid := cidtest.ID()
treeID := "version"
@@ -709,8 +433,6 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}}
@@ -722,7 +444,6 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
})
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta)
@@ -742,7 +463,7 @@ func TestForest_ApplySameOperation(t *testing.T) {
}
}
-func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) {
+func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, parallel bool) {
cid := cidtest.ID()
treeID := "version"
@@ -792,8 +513,6 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
}
@@ -801,10 +520,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
})
s := constructor(t, WithMaxBatchSize(batchSize))
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
- for range batchSize {
+ for i := 0; i < batchSize; i++ {
errG.Go(func() error {
return s.TreeApply(ctx, cid, treeID, &logs[2], false)
})
@@ -822,7 +539,7 @@ func TestForest_GetOpLog(t *testing.T) {
}
}
-func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
+func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
cid := cidtest.ID()
treeID := "version"
logs := []Move{
@@ -842,7 +559,6 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
}
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@@ -881,9 +597,8 @@ func TestForest_TreeExists(t *testing.T) {
}
}
-func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
+func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) Forest) {
s := constructor(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID)
@@ -942,8 +657,6 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
@@ -1005,8 +718,6 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
@@ -1043,7 +754,7 @@ func TestForest_ParallelApply(t *testing.T) {
// The operations are guaranteed to be applied and returned sorted by `Time`.
func prepareRandomTree(nodeCount, opCount int) []Move {
ops := make([]Move, nodeCount+opCount)
- for i := range nodeCount {
+ for i := 0; i < nodeCount; i++ {
ops[i] = Move{
Parent: 0,
Meta: Meta{
@@ -1081,7 +792,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
}
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
- for i := range uint64(nodeCount) {
+ for i := uint64(0); i < uint64(nodeCount); i++ {
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
@@ -1104,7 +815,7 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID
}
}
-func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) {
+func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, batchSize, opCount, iterCount int) {
r := mrand.New(mrand.NewSource(42))
const nodeCount = 5
@@ -1115,20 +826,18 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
-
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
- for range iterCount {
+ for i := 0; i < iterCount; i++ {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
wg := new(sync.WaitGroup)
ch := make(chan *Move)
- for range batchSize {
+ for i := 0; i < batchSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -1145,11 +854,10 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
}
}
-func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
+func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
r := mrand.New(mrand.NewSource(42))
const (
@@ -1163,14 +871,12 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version"
expected := constructor(t, WithNoSync(true))
- defer func() { require.NoError(t, expected.Close(context.Background())) }()
-
for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
const iterCount = 200
- for range iterCount {
+ for i := 0; i < iterCount; i++ {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
@@ -1179,7 +885,6 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
- require.NoError(t, actual.Close(context.Background()))
}
}
@@ -1197,8 +902,6 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
-
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
for i := range ops {
@@ -1233,8 +936,6 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs))
- defer func() { require.NoError(b, s.Close(context.Background())) }()
-
benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount)
for i := range ops {
@@ -1247,7 +948,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
Child: uint64(r.Intn(benchNodeCount)),
}
if i != 0 && i%blockSize == 0 {
- for j := range blockSize / 2 {
+ for j := 0; j < blockSize/2; j++ {
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
}
}
@@ -1265,7 +966,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
cid := cidtest.ID()
treeID := "version"
ch := make(chan int, b.N)
- for i := range b.N {
+ for i := 0; i < b.N; i++ {
ch <- i
}
@@ -1289,8 +990,7 @@ func TestTreeGetByPath(t *testing.T) {
}
}
-func testTreeGetByPath(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
+func testTreeGetByPath(t *testing.T, s Forest) {
cid := cidtest.ID()
treeID := "version"
@@ -1311,7 +1011,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) {
if mf, ok := s.(*memoryForest); ok {
single := mf.treeMap[cid.String()+"/"+treeID]
t.Run("test meta", func(t *testing.T) {
- for i := range 6 {
+ for i := 0; i < 6; i++ {
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
}
})
@@ -1368,9 +1068,7 @@ func TestGetTrees(t *testing.T) {
}
}
-func testTreeGetTrees(t *testing.T, s ForestStorage) {
- defer func() { require.NoError(t, s.Close(context.Background())) }()
-
+func testTreeGetTrees(t *testing.T, s Forest) {
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1}
@@ -1414,9 +1112,7 @@ func TestTreeLastSyncHeight(t *testing.T) {
}
}
-func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
- defer func() { require.NoError(t, f.Close(context.Background())) }()
-
+func testTreeLastSyncHeight(t *testing.T, f Forest) {
cnr := cidtest.ID()
treeID := "someTree"
@@ -1456,58 +1152,3 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
-
-func TestForest_ListTrees(t *testing.T) {
- for i := range providers {
- t.Run(providers[i].name, func(t *testing.T) {
- testTreeListTrees(t, providers[i].construct)
- })
- }
-}
-
-func testTreeListTrees(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
- batchSize := 10
- t.Run("empty", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, 0)
- })
- t.Run("count lower than batch size", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, batchSize-1)
- })
- t.Run("count equals batch size", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, batchSize)
- })
- t.Run("count greater than batch size", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, batchSize+1)
- })
- t.Run("count equals multiplied batch size", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, 3*batchSize)
- })
- t.Run("count equals multiplied batch size with addition", func(t *testing.T) {
- testTreeListTreesCount(t, constructor, batchSize, 3*batchSize+batchSize/2)
- })
-}
-
-func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, count int) {
- f := constructor(t)
- var expected []ContainerIDTreeID
-
- treeIDs := []string{"version", "system", "s", "avada kedavra"}
- for i := range count {
- cid := cidtest.ID()
- treeID := treeIDs[i%len(treeIDs)]
- expected = append(expected, ContainerIDTreeID{
- CID: cid,
- TreeID: treeID,
- })
-
- ops := prepareRandomTree(5, 5)
- for _, op := range ops {
- require.NoError(t, f.TreeApply(context.Background(), cid, treeID, &op, false))
- }
- }
-
- actual, err := treeListAll(context.Background(), f, batchSize)
- require.NoError(t, err)
-
- require.ElementsMatch(t, expected, actual)
-}
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
deleted file mode 100644
index b035be1e1..000000000
--- a/pkg/local_object_storage/pilorama/heap.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package pilorama
-
-import (
- "container/heap"
- "slices"
- "strings"
-)
-
-type heapInfo struct {
- id MultiNode
- filename string
-}
-
-type filenameHeap []heapInfo
-
-func (h filenameHeap) Len() int { return len(h) }
-func (h filenameHeap) Less(i, j int) bool { return h[i].filename < h[j].filename }
-func (h filenameHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *filenameHeap) Push(x any) {
- *h = append(*h, x.(heapInfo))
-}
-
-func (h *filenameHeap) Pop() any {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[0 : n-1]
- return x
-}
-
-// fixedHeap maintains a fixed number of smallest elements started at some point.
-type fixedHeap struct {
- start *Cursor
- sorted bool
- count int
- h *filenameHeap
-}
-
-func newHeap(start *Cursor, count int) *fixedHeap {
- h := new(filenameHeap)
- heap.Init(h)
-
- return &fixedHeap{
- start: start,
- count: count,
- h: h,
- }
-}
-
-const amortizationMultiplier = 5
-
-func (h *fixedHeap) push(id MultiNode, filename string) bool {
- if h.start != nil {
- if filename < h.start.GetFilename() {
- return false
- } else if filename == h.start.GetFilename() {
- // A tree may have a lot of nodes with the same filename but different versions so that
- // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call
- // with the same filename.
- pos := slices.Index(id, h.start.GetNode())
- if pos == -1 || pos+1 >= len(id) {
- return false
- }
- id = id[pos+1:]
- }
- }
-
- *h.h = append(*h.h, heapInfo{id: id, filename: filename})
- h.sorted = false
-
- if h.h.Len() > h.count*amortizationMultiplier {
- slices.SortFunc(*h.h, func(a, b heapInfo) int {
- return strings.Compare(a.filename, b.filename)
- })
- *h.h = (*h.h)[:h.count]
- }
- return true
-}
-
-func (h *fixedHeap) pop() (heapInfo, bool) {
- if !h.sorted {
- slices.SortFunc(*h.h, func(a, b heapInfo) int {
- return strings.Compare(a.filename, b.filename)
- })
- if len(*h.h) > h.count {
- *h.h = (*h.h)[:h.count]
- }
- h.sorted = true
- }
- if len(*h.h) != 0 {
- info := (*h.h)[0]
- *h.h = (*h.h)[1:]
- return info, true
- }
- return heapInfo{}, false
-}
diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go
index 28b7faec8..c9f5df3b7 100644
--- a/pkg/local_object_storage/pilorama/inmemory.go
+++ b/pkg/local_object_storage/pilorama/inmemory.go
@@ -1,9 +1,6 @@
package pilorama
-import (
- "cmp"
- "slices"
-)
+import "sort"
// nodeInfo couples parent and metadata.
type nodeInfo struct {
@@ -35,9 +32,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
- s.infoMap[op.Child] = op.Old
+ s.tree.infoMap[op.Child] = op.Old
} else {
- delete(s.infoMap, op.Child)
+ delete(s.tree.infoMap, op.Child)
}
}
@@ -83,8 +80,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
- shouldPut := !s.isAncestor(op.Child, op.Parent)
- p, ok := s.infoMap[op.Child]
+ shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
+ p, ok := s.tree.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@@ -100,7 +97,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
- s.infoMap[op.Child] = p
+ s.tree.infoMap[op.Child] = p
return lm
}
@@ -134,10 +131,10 @@ func (t tree) getChildren(parent Node) []Node {
}
}
- slices.SortFunc(children, func(ci, cj uint64) int {
- a := t.infoMap[ci]
- b := t.infoMap[cj]
- return cmp.Compare(a.Meta.Time, b.Meta.Time)
+ sort.Slice(children, func(i, j int) bool {
+ a := t.infoMap[children[i]]
+ b := t.infoMap[children[j]]
+ return a.Meta.Time < b.Meta.Time
})
return children
}
@@ -192,7 +189,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
- var lastTS Timestamp
+ var lastTs Timestamp
children := t.getChildren(curNode)
for i := range children {
@@ -200,7 +197,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
- if info.Meta.Time >= lastTS {
+ if info.Meta.Time >= lastTs {
nodes = append(nodes[:0], children[i])
}
} else {
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index e1f6cd8e7..e7f7eb512 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -21,8 +21,6 @@ type Forest interface {
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
- // TreeApplyBatch applies replicated operations from another node.
- TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
@@ -35,9 +33,6 @@ type Forest interface {
// TreeGetChildren returns children of the node with the specified ID. The order is arbitrary.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
- // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
- // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
@@ -62,16 +57,12 @@ type Forest interface {
type ForestStorage interface {
// DumpInfo returns information about the pilorama.
DumpInfo() Info
- Init(context.Context) error
- Open(context.Context, mode.Mode) error
- Close(context.Context) error
- SetMode(context.Context, mode.Mode) error
+ Init() error
+ Open(context.Context, bool) error
+ Close() error
+ SetMode(m mode.Mode) error
SetParentID(id string)
Forest
-
- // TreeListTrees returns all pairs "containerID:treeID".
- TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error)
- TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error
}
const (
@@ -79,38 +70,6 @@ const (
AttributeVersion = "Version"
)
-// Cursor keeps state between function calls for traversing nodes.
-// It stores the attributes associated with a previous call, allowing subsequent operations
-// to resume traversal from this point rather than starting from the beginning.
-type Cursor struct {
- // Last traversed filename.
- filename string
-
- // Last traversed node.
- node Node
-}
-
-func NewCursor(filename string, node Node) *Cursor {
- return &Cursor{
- filename: filename,
- node: node,
- }
-}
-
-func (c *Cursor) GetFilename() string {
- if c == nil {
- return ""
- }
- return c.filename
-}
-
-func (c *Cursor) GetNode() Node {
- if c == nil {
- return Node(0)
- }
- return c.node
-}
-
// CIDDescriptor contains container ID and information about the node position
// in the list of container nodes.
type CIDDescriptor struct {
@@ -126,68 +85,3 @@ var ErrInvalidCIDDescriptor = logicerr.New("cid descriptor is invalid")
func (d CIDDescriptor) checkValid() bool {
return 0 <= d.Position && d.Position < d.Size
}
-
-var treeListTreesBatchSizeDefault = 1000
-
-type ContainerIDTreeID struct {
- CID cidSDK.ID
- TreeID string
-}
-
-type TreeListTreesPrm struct {
- NextPageToken []byte
- // BatchSize is batch size to list trees. If not lower or equals zero, than treeListTreesBatchSizeDefault is used.
- BatchSize int
-}
-
-type TreeListTreesResult struct {
- NextPageToken []byte
- Items []ContainerIDTreeID
-}
-
-type treeList interface {
- TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*TreeListTreesResult, error)
-}
-
-func TreeListAll(ctx context.Context, f treeList) ([]ContainerIDTreeID, error) {
- return treeListAll(ctx, f, treeListTreesBatchSizeDefault)
-}
-
-func treeListAll(ctx context.Context, f treeList, batchSize int) ([]ContainerIDTreeID, error) {
- var prm TreeListTreesPrm
- prm.BatchSize = batchSize
- var result []ContainerIDTreeID
- first := true
-
- for len(prm.NextPageToken) > 0 || first {
- first = false
-
- res, err := f.TreeListTrees(ctx, prm)
- if err != nil {
- return nil, err
- }
- prm.NextPageToken = res.NextPageToken
- result = append(result, res.Items...)
- }
-
- return result, nil
-}
-
-func TreeCountAll(ctx context.Context, f treeList) (uint64, error) {
- var prm TreeListTreesPrm
- var result uint64
- first := true
-
- for len(prm.NextPageToken) > 0 || first {
- first = false
-
- res, err := f.TreeListTrees(ctx, prm)
- if err != nil {
- return 0, err
- }
- prm.NextPageToken = res.NextPageToken
- result += uint64(len(res.Items))
- }
-
- return result, nil
-}
diff --git a/pkg/local_object_storage/pilorama/meta.go b/pkg/local_object_storage/pilorama/meta.go
index 45e9c2f79..49b7f6477 100644
--- a/pkg/local_object_storage/pilorama/meta.go
+++ b/pkg/local_object_storage/pilorama/meta.go
@@ -21,11 +21,7 @@ func (x Meta) Bytes() []byte {
}
func (x Meta) GetAttr(name string) []byte {
- return findAttr(x.Items, name)
-}
-
-func findAttr(ms []KeyValue, name string) []byte {
- for _, kv := range ms {
+ for _, kv := range x.Items {
if kv.Key == name {
return kv.Value
}
diff --git a/pkg/local_object_storage/pilorama/meta_test.go b/pkg/local_object_storage/pilorama/meta_test.go
index f329f6092..9df4c7e94 100644
--- a/pkg/local_object_storage/pilorama/meta_test.go
+++ b/pkg/local_object_storage/pilorama/meta_test.go
@@ -21,8 +21,7 @@ func TestMeta_Bytes(t *testing.T) {
Items: []KeyValue{
{"abc", []byte{1, 2, 3}},
{AttributeFilename, []byte{}},
- },
- }
+ }}
data := expected.Bytes()
@@ -36,8 +35,7 @@ func TestMeta_Bytes(t *testing.T) {
Items: []KeyValue{
{"abc", []byte{1, 2, 3}},
{"xyz", []byte{5, 6, 7, 8}},
- },
- }
+ }}
data := expected.Bytes()
diff --git a/pkg/local_object_storage/pilorama/metrics.go b/pkg/local_object_storage/pilorama/metrics.go
index 6ffc479e4..543ad3e31 100644
--- a/pkg/local_object_storage/pilorama/metrics.go
+++ b/pkg/local_object_storage/pilorama/metrics.go
@@ -9,7 +9,7 @@ import (
type Metrics interface {
SetParentID(id string)
- SetMode(m mode.ComponentMode)
+ SetMode(m mode.Mode)
Close()
AddMethodDuration(method string, d time.Duration, success bool)
@@ -18,6 +18,6 @@ type Metrics interface {
type noopMetrics struct{}
func (m *noopMetrics) SetParentID(string) {}
-func (m *noopMetrics) SetMode(mode.ComponentMode) {}
+func (m *noopMetrics) SetMode(mode.Mode) {}
func (m *noopMetrics) Close() {}
func (m *noopMetrics) AddMethodDuration(string, time.Duration, bool) {}
diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go
deleted file mode 100644
index 0c042aa56..000000000
--- a/pkg/local_object_storage/pilorama/mode_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package pilorama
-
-import (
- "context"
- "path/filepath"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "github.com/stretchr/testify/require"
-)
-
-func Test_Mode(t *testing.T) {
- t.Parallel()
- f := NewBoltForest(
- []Option{
- WithPath(filepath.Join(t.TempDir(), "test.db")),
- WithMaxBatchSize(1),
- }...)
-
- require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly))
- require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
- require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
-
- require.NoError(t, f.Open(context.Background(), mode.Degraded))
- require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Init(context.Background()))
- require.Nil(t, f.(*boltForest).db)
- require.NoError(t, f.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go
deleted file mode 100644
index 36d347f10..000000000
--- a/pkg/local_object_storage/pilorama/multinode.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package pilorama
-
-import "bytes"
-
-// MultiNode represents a group of internal nodes accessible by the same path, but having different id.
-type MultiNode []Node
-
-// MultiNodeInfo represents a group of internal nodes accessible by the same path, but having different id.
-type MultiNodeInfo struct {
- Children MultiNode
- Parents MultiNode
- Timestamps []uint64
- Meta []KeyValue
-}
-
-func (r *MultiNodeInfo) Add(info NodeInfo) bool {
- if !isInternal(info.Meta.Items) || !isInternal(r.Meta) ||
- !bytes.Equal(r.Meta[0].Value, info.Meta.Items[0].Value) {
- return false
- }
-
- r.Children = append(r.Children, info.ID)
- r.Parents = append(r.Parents, info.ParentID)
- r.Timestamps = append(r.Timestamps, info.Meta.Time)
- return true
-}
-
-func (r *MultiNodeInfo) LastChild() Node {
- return r.Children[len(r.Children)-1]
-}
-
-func (n NodeInfo) ToMultiNode() MultiNodeInfo {
- return MultiNodeInfo{
- Children: MultiNode{n.ID},
- Parents: MultiNode{n.ParentID},
- Timestamps: []uint64{n.Meta.Time},
- Meta: n.Meta.Items,
- }
-}
-
-func isInternal(m []KeyValue) bool {
- return len(m) == 1 && m[0].Key == AttributeFilename
-}
-
-func mergeNodeInfos(ns []NodeInfo) []MultiNodeInfo {
- var r []MultiNodeInfo
- for _, info := range ns {
- if len(r) == 0 || !r[len(r)-1].Add(info) {
- r = append(r, info.ToMultiNode())
- }
- }
- return r
-}
diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go
deleted file mode 100644
index eecee1527..000000000
--- a/pkg/local_object_storage/pilorama/split_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package pilorama
-
-import (
- "context"
- "strings"
- "testing"
-
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestDuplicateDirectory(t *testing.T) {
- for i := range providers {
- if providers[i].name == "inmemory" {
- continue
- }
- t.Run(providers[i].name, func(t *testing.T) {
- testDuplicateDirectory(t, providers[i].construct(t))
- })
- }
-}
-
-func testDuplicateDirectory(t *testing.T, f Forest) {
- ctx := context.Background()
- d := CIDDescriptor{CID: cidtest.ID(), Size: 1}
- treeID := "sometree"
-
- treeApply := func(t *testing.T, parent, child uint64, filename string, internal bool) {
- // Nothing magic here, we add items in order and children are unique.
- // This simplifies function interface a bit.
- ts := child
-
- kv := []KeyValue{{Key: AttributeFilename, Value: []byte(filename)}}
- if !internal {
- kv = append(kv, KeyValue{Key: "uniqueAttr", Value: []byte{byte(child)}})
- }
-
- err := f.TreeApply(ctx, d.CID, treeID, &Move{
- Parent: parent,
- Child: child,
- Meta: Meta{
- Time: ts,
- Items: kv,
- },
- }, true)
- require.NoError(t, err)
- }
-
- // The following tree is constructed:
- // 0
- // [1] |-- dir1 (internal)
- // [2] |-- value1
- // [3] |-- dir3 (internal)
- // [4] |-- value3
- // [5] |-- dir1 (internal)
- // [6] |-- value2
- // [7] |-- dir3 (internal)
- // [8] |-- value4
- // [9] |-- dir2 (internal)
- // [10] |-- value0
- treeApply(t, RootID, 1, "dir1", true)
- treeApply(t, 1, 2, "value1", false)
- treeApply(t, 1, 3, "dir3", true)
- treeApply(t, 3, 4, "value3", false)
- treeApply(t, RootID, 5, "dir1", true)
- treeApply(t, 5, 6, "value2", false)
- treeApply(t, 5, 7, "dir3", true)
- treeApply(t, 7, 8, "value4", false)
- treeApply(t, RootID, 9, "dir2", true)
- treeApply(t, RootID, 10, "value0", false)
-
- // The compacted view:
- // 0
- // [1,5] |-- dir1 (internal)
- // [2] |-- value1
- // [3,7] |-- dir3 (internal)
- // [4] |-- value3
- // [8] |-- value4
- // [6] |-- value2
- // [9] |-- dir2 (internal)
- // [10] |-- value0
- testGetByPath := func(t *testing.T, p string) []byte {
- pp := strings.Split(p, "/")
- nodes, err := f.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, pp, false)
- require.NoError(t, err)
- require.Equal(t, 1, len(nodes))
-
- meta, _, err := f.TreeGetMeta(ctx, d.CID, treeID, nodes[0])
- require.NoError(t, err)
- require.Equal(t, []byte(pp[len(pp)-1]), meta.GetAttr(AttributeFilename))
- return meta.GetAttr("uniqueAttr")
- }
-
- require.Equal(t, []byte{2}, testGetByPath(t, "dir1/value1"))
- require.Equal(t, []byte{4}, testGetByPath(t, "dir1/dir3/value3"))
- require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4"))
- require.Equal(t, []byte{10}, testGetByPath(t, "value0"))
-
- testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) {
- res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize)
- require.NoError(t, err)
- return res, last
- }
-
- t.Run("test sorted listing, full children branch", func(t *testing.T) {
- t.Run("big batch size", func(t *testing.T) {
- res, _ := testSortedByFilename(t, MultiNode{RootID}, nil, 10)
- require.Equal(t, 3, len(res))
- require.Equal(t, MultiNode{1, 5}, res[0].Children)
- require.Equal(t, MultiNode{9}, res[1].Children)
- require.Equal(t, MultiNode{10}, res[2].Children)
-
- t.Run("multi-root", func(t *testing.T) {
- res, _ := testSortedByFilename(t, MultiNode{1, 5}, nil, 10)
- require.Equal(t, 3, len(res))
- require.Equal(t, MultiNode{3, 7}, res[0].Children)
- require.Equal(t, MultiNode{2}, res[1].Children)
- require.Equal(t, MultiNode{6}, res[2].Children)
- })
- })
- t.Run("small batch size", func(t *testing.T) {
- res, last := testSortedByFilename(t, MultiNode{RootID}, nil, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{1, 5}, res[0].Children)
-
- res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{9}, res[0].Children)
-
- res, last = testSortedByFilename(t, MultiNode{RootID}, last, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{10}, res[0].Children)
-
- res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1)
- require.Equal(t, 0, len(res))
-
- t.Run("multi-root", func(t *testing.T) {
- res, last := testSortedByFilename(t, MultiNode{1, 5}, nil, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{3, 7}, res[0].Children)
-
- res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{2}, res[0].Children)
-
- res, last = testSortedByFilename(t, MultiNode{1, 5}, last, 1)
- require.Equal(t, 1, len(res))
- require.Equal(t, MultiNode{6}, res[0].Children)
-
- res, _ = testSortedByFilename(t, MultiNode{RootID}, last, 1)
- require.Equal(t, 0, len(res))
- })
- })
- })
-}
diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go
index b4015ae8d..24090e8d8 100644
--- a/pkg/local_object_storage/shard/container.go
+++ b/pkg/local_object_storage/shard/container.go
@@ -1,13 +1,9 @@
package shard
import (
- "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
)
type ContainerSizePrm struct {
@@ -26,7 +22,7 @@ func (r ContainerSizeRes) Size() uint64 {
return r.size
}
-func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) {
+func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
s.m.RLock()
defer s.m.RUnlock()
@@ -34,117 +30,12 @@ func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (Contai
return ContainerSizeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerSizeRes{}, err
- }
- defer release()
-
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
- return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
+ return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
}
return ContainerSizeRes{
size: size,
}, nil
}
-
-type ContainerCountPrm struct {
- ContainerID cid.ID
-}
-
-type ContainerCountRes struct {
- Phy uint64
- Logic uint64
- User uint64
-}
-
-func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (ContainerCountRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ContainerCount",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.Stringer("container_id", prm.ContainerID),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return ContainerCountRes{}, ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ContainerCountRes{}, err
- }
- defer release()
-
- counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
- if err != nil {
- return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
- }
-
- return ContainerCountRes{
- Phy: counters.Phy,
- Logic: counters.Logic,
- User: counters.User,
- }, nil
-}
-
-func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerSize",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.Stringer("container_id", id),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- return s.metaBase.DeleteContainerSize(ctx, id)
-}
-
-func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.DeleteContainerCount",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.Stringer("container_id", id),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- return s.metaBase.DeleteContainerCount(ctx, id)
-}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index d489b8b0d..257498b32 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -4,8 +4,6 @@ import (
"context"
"errors"
"fmt"
- "slices"
- "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -17,28 +15,27 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
- "golang.org/x/sync/errgroup"
)
-func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error {
- s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode,
+func (s *Shard) handleMetabaseFailure(stage string, err error) error {
+ s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.ReadOnly)
+ err = s.SetMode(mode.ReadOnly)
if err == nil {
return nil
}
- s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode,
+ s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
- err = s.SetMode(ctx, mode.DegradedReadOnly)
+ err = s.SetMode(mode.DegradedReadOnly)
if err != nil {
- return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
+ return fmt.Errorf("could not switch to mode %s", mode.DegradedReadOnly)
}
return nil
}
@@ -46,17 +43,12 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err
// Open opens all Shard's components.
func (s *Shard) Open(ctx context.Context) error {
components := []interface {
- Open(context.Context, mode.Mode) error
+ Open(context.Context, bool) error
}{
- s.blobStor,
- }
- m := s.GetMode()
-
- if !m.NoMetabase() {
- components = append(components, s.metaBase)
+ s.blobStor, s.metaBase,
}
- if s.hasWriteCache() && !m.NoMetabase() {
+ if s.hasWriteCache() {
components = append(components, s.writeCache)
}
@@ -65,17 +57,17 @@ func (s *Shard) Open(ctx context.Context) error {
}
for i, component := range components {
- if err := component.Open(ctx, m); err != nil {
+ if err := component.Open(ctx, false); err != nil {
if component == s.metaBase {
// We must first open all other components to avoid
// opening non-existent DB in read-only mode.
for j := i + 1; j < len(components); j++ {
- if err := components[j].Open(ctx, m); err != nil {
+ if err := components[j].Open(ctx, false); err != nil {
// Other components must be opened, fail.
- return fmt.Errorf("open %T: %w", components[j], err)
+ return fmt.Errorf("could not open %T: %w", components[j], err)
}
}
- err = s.handleMetabaseFailure(ctx, "open", err)
+ err = s.handleMetabaseFailure("open", err)
if err != nil {
return err
}
@@ -83,7 +75,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
- return fmt.Errorf("open %T: %w", component, err)
+ return fmt.Errorf("could not open %T: %w", component, err)
}
}
return nil
@@ -91,8 +83,8 @@ func (s *Shard) Open(ctx context.Context) error {
type metabaseSynchronizer Shard
-func (x *metabaseSynchronizer) Init(ctx context.Context) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init")
+func (x *metabaseSynchronizer) Init() error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
@@ -100,50 +92,13 @@ func (x *metabaseSynchronizer) Init(ctx context.Context) error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
- m := s.GetMode()
- if err := s.initializeComponents(ctx, m); err != nil {
- return err
- }
-
- s.updateMetrics(ctx)
-
- s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- newEpochChan: make(chan uint64),
- newEpochHandlers: &newEpochHandlers{
- cancelFunc: func() {},
- handlers: []newEpochHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
- },
- },
- }
- if s.gc.metrics != nil {
- s.gc.metrics.SetShardID(s.info.ID.String())
- }
-
- s.gc.init(ctx)
-
- s.rb = newRebuilder()
- if !m.NoMetabase() {
- s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
- }
- s.writecacheSealCancel.Store(dummyCancel)
- return nil
-}
-
-func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
type initializer interface {
- Init(context.Context) error
+ Init() error
}
var components []initializer
- if !m.NoMetabase() {
+ if !s.GetMode().NoMetabase() {
var initMetabase initializer
if s.NeedRefillMetabase() {
@@ -159,7 +114,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
components = []initializer{s.blobStor}
}
- if s.hasWriteCache() && !m.NoMetabase() {
+ if s.hasWriteCache() {
components = append(components, s.writeCache)
}
@@ -168,13 +123,13 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
}
for _, component := range components {
- if err := component.Init(ctx); err != nil {
+ if err := component.Init(); err != nil {
if component == s.metaBase {
- if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
+ if errors.Is(err, meta.ErrOutdatedVersion) {
return fmt.Errorf("metabase initialization: %w", err)
}
- err = s.handleMetabaseFailure(ctx, "init", err)
+ err = s.handleMetabaseFailure("init", err)
if err != nil {
return err
}
@@ -182,141 +137,89 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
break
}
- return fmt.Errorf("initialize %T: %w", component, err)
+ return fmt.Errorf("could not initialize %T: %w", component, err)
}
}
+
+ s.updateMetrics(ctx)
+
+ s.gc = &gc{
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ eventChan: make(chan Event),
+ mEventHandler: map[eventType]*eventHandlers{
+ eventNewEpoch: {
+ cancelFunc: func() {},
+ handlers: []eventHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ },
+ },
+ },
+ }
+
+ s.gc.init(ctx)
+
return nil
}
func (s *Shard) refillMetabase(ctx context.Context) error {
- path := s.metaBase.DumpInfo().Path
- s.metricsWriter.SetRefillStatus(path, "running")
- s.metricsWriter.SetRefillPercent(path, 0)
- var success bool
- defer func() {
- if success {
- s.metricsWriter.SetRefillStatus(path, "completed")
- } else {
- s.metricsWriter.SetRefillStatus(path, "failed")
- }
- }()
-
err := s.metaBase.Reset()
if err != nil {
- return fmt.Errorf("reset metabase: %w", err)
+ return fmt.Errorf("could not reset metabase: %w", err)
}
- withCount := true
- totalObjects, err := s.blobStor.ObjectsCount(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
- withCount = false
- }
+ obj := objectSDK.New()
- eg, egCtx := errgroup.WithContext(ctx)
- if s.refillMetabaseWorkersCount > 0 {
- eg.SetLimit(s.refillMetabaseWorkersCount)
- }
-
- var completedCount uint64
- var metricGuard sync.Mutex
- itErr := blobstor.IterateBinaryObjects(egCtx, s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
- eg.Go(func() error {
- var success bool
- defer func() {
- s.metricsWriter.IncRefillObjectsCount(path, len(data), success)
- if withCount {
- metricGuard.Lock()
- completedCount++
- s.metricsWriter.SetRefillPercent(path, uint32(completedCount*100/totalObjects))
- metricGuard.Unlock()
- }
- }()
-
- if err := s.refillObject(egCtx, data, addr, descriptor); err != nil {
- return err
- }
- success = true
- return nil
- })
-
- select {
- case <-egCtx.Done():
- return egCtx.Err()
- default:
+ err = blobstor.IterateBinaryObjects(ctx, s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
+ if err := obj.Unmarshal(data); err != nil {
+ s.log.Warn(logs.ShardCouldNotUnmarshalObject,
+ zap.Stringer("address", addr),
+ zap.String("err", err.Error()))
return nil
}
+
+ var err error
+ switch obj.Type() {
+ case objectSDK.TypeTombstone:
+ err = s.refillTombstoneObject(ctx, obj)
+ case objectSDK.TypeLock:
+ err = s.refillLockObject(ctx, obj)
+ default:
+ }
+ if err != nil {
+ return err
+ }
+
+ var mPrm meta.PutPrm
+ mPrm.SetObject(obj)
+ mPrm.SetStorageID(descriptor)
+
+ _, err = s.metaBase.Put(ctx, mPrm)
+ if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
+ return err
+ }
+
+ return nil
})
-
- egErr := eg.Wait()
-
- err = errors.Join(egErr, itErr)
if err != nil {
- return fmt.Errorf("put objects to the meta: %w", err)
+ return fmt.Errorf("could not put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
- return fmt.Errorf("sync object counters: %w", err)
+ return fmt.Errorf("could not sync object counters: %w", err)
}
- success = true
- s.metricsWriter.SetRefillPercent(path, 100)
- return nil
-}
-
-func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
- obj := objectSDK.New()
- if err := obj.Unmarshal(data); err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
- zap.Stringer("address", addr),
- zap.Error(err))
- return nil
- }
-
- hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
-
- var isIndexedContainer bool
- if hasIndexedAttribute {
- info, err := s.containerInfo.Info(ctx, addr.Container())
- if err != nil {
- return err
- }
- if info.Removed {
- s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
- return nil
- }
- isIndexedContainer = info.Indexed
- }
-
- var err error
- switch obj.Type() {
- case objectSDK.TypeTombstone:
- err = s.refillTombstoneObject(ctx, obj)
- case objectSDK.TypeLock:
- err = s.refillLockObject(ctx, obj)
- default:
- }
- if err != nil {
- return err
- }
-
- var mPrm meta.PutPrm
- mPrm.SetObject(obj)
- mPrm.SetStorageID(descriptor)
- mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
-
- _, err = s.metaBase.Put(ctx, mPrm)
- if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
- return err
- }
return nil
}
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal lock content: %w", err)
+ return fmt.Errorf("could not unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@@ -326,7 +229,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
- return fmt.Errorf("lock objects: %w", err)
+ return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
@@ -335,7 +238,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
- return fmt.Errorf("unmarshal tombstone content: %w", err)
+ return fmt.Errorf("could not unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@@ -356,26 +259,20 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- return fmt.Errorf("inhume objects: %w", err)
+ return fmt.Errorf("could not inhume objects: %w", err)
}
return nil
}
// Close releases all Shard's components.
-func (s *Shard) Close(ctx context.Context) error {
- unlock := s.lockExclusive()
- if s.rb != nil {
- s.rb.Stop(ctx, s.log)
- }
- var components []interface{ Close(context.Context) error }
+func (s *Shard) Close() error {
+ components := []interface{ Close() error }{}
if s.pilorama != nil {
components = append(components, s.pilorama)
}
if s.hasWriteCache() {
- prev := s.writecacheSealCancel.Swap(notInitializedCancel)
- prev.cancel() // no need to wait: writecache.Seal and writecache.Close lock the same mutex
components = append(components, s.writeCache)
}
@@ -383,23 +280,15 @@ func (s *Shard) Close(ctx context.Context) error {
var lastErr error
for _, component := range components {
- if err := component.Close(ctx); err != nil {
+ if err := component.Close(); err != nil {
lastErr = err
- s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+ s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
- if s.opsLimiter != nil {
- s.opsLimiter.Close()
- }
-
- unlock()
-
- // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
- // So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
- s.gc.stop(ctx)
+ s.gc.stop()
}
return lastErr
@@ -421,18 +310,11 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
unlock := s.lockExclusive()
defer unlock()
- s.rb.Stop(ctx, s.log)
- if !s.info.Mode.NoMetabase() {
- defer func() {
- s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
- }()
- }
-
- ok, err := s.metaBase.Reload(ctx, c.metaOpts...)
+ ok, err := s.metaBase.Reload(c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
}
return err
}
@@ -444,19 +326,17 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
// config after the node was updated.
err = s.refillMetabase(ctx)
} else {
- err = s.metaBase.Init(ctx)
+ err = s.metaBase.Init()
}
if err != nil {
- s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
- _ = s.setMode(ctx, mode.DegradedReadOnly)
+ s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+ _ = s.setMode(mode.DegradedReadOnly)
return err
}
}
- if c.opsLimiter != nil {
- s.opsLimiter.Close()
- s.opsLimiter = c.opsLimiter
- }
- return s.setMode(ctx, c.info.Mode)
+
+ s.log.Info(logs.ShardTryingToRestoreReadwriteMode)
+ return s.setMode(mode.ReadWrite)
}
func (s *Shard) lockExclusive() func() {
@@ -466,9 +346,6 @@ func (s *Shard) lockExclusive() func() {
cancelGC := val.(context.CancelFunc)
cancelGC()
}
- if c := s.writecacheSealCancel.Load(); c != nil {
- c.cancel()
- }
s.m.Lock()
s.setModeRequested.Store(false)
return s.m.Unlock
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 6d2cd7137..749229cc3 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -2,7 +2,6 @@ package shard
import (
"context"
- "fmt"
"io/fs"
"math"
"os"
@@ -12,12 +11,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -57,14 +58,17 @@ func TestShardOpen(t *testing.T) {
return nil, fs.ErrPermission
}
- wcOpts := []writecache.Option{
- writecache.WithPath(filepath.Join(dir, "wc")),
+ wcOpts := writecacheconfig.Options{
+ Type: writecacheconfig.TypeBBolt,
+ BBoltOptions: []writecachebbolt.Option{
+ writecachebbolt.WithPath(filepath.Join(dir, "wc")),
+ },
}
newShard := func() *Shard {
return New(
WithID(NewIDFromBytes([]byte{})),
- WithLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{Storage: st},
@@ -86,7 +90,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadWrite, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase can be opened in read-only => start in ReadOnly mode.
allowedMode.Store(int64(os.O_RDONLY))
@@ -95,9 +99,9 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite))
+ require.Error(t, sh.SetMode(mode.ReadWrite))
require.Equal(t, mode.ReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
// Metabase is corrupted => start in DegradedReadOnly mode.
allowedMode.Store(math.MaxInt64)
@@ -106,7 +110,7 @@ func TestShardOpen(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
require.Equal(t, mode.DegradedReadOnly, sh.GetMode())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabaseCorrupted(t *testing.T) {
@@ -126,15 +130,11 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
- mm := newMetricStore()
-
sh := New(
WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
- WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})),
- WithMetricsWriter(mm),
- )
+ WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
@@ -146,23 +146,18 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
- // This is copied from `fstree.treePath()` to avoid exporting function just for tests.
- {
- saddr := addr.Object().EncodeToString() + "." + addr.Container().EncodeToString()
- p := fmt.Sprintf("%s/%s/%s", fsTree.RootPath, saddr[:2], saddr[2:])
- require.NoError(t, os.WriteFile(p, []byte("not an object"), fsTree.Permissions))
- }
+ _, err = fsTree.Put(context.Background(), common.PutPrm{Address: addr, RawData: []byte("not an object")})
+ require.NoError(t, err)
sh = New(
WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})),
- WithRefillMetabase(true),
- WithMetricsWriter(mm))
+ WithRefillMetabase(true))
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
@@ -170,7 +165,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
getPrm.SetAddress(addr)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
}
func TestRefillMetabase(t *testing.T) {
@@ -190,8 +185,6 @@ func TestRefillMetabase(t *testing.T) {
}),
}
- mm := newMetricStore()
-
sh := New(
WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
@@ -201,7 +194,6 @@ func TestRefillMetabase(t *testing.T) {
),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(mm),
)
// open Blobstor
@@ -216,7 +208,7 @@ func TestRefillMetabase(t *testing.T) {
locked := make([]oid.ID, 1, 2)
locked[0] = oidtest.ID()
cnrLocked := cidtest.ID()
- for range objNum {
+ for i := uint64(0); i < objNum; i++ {
obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular)
@@ -355,10 +347,10 @@ func TestRefillMetabase(t *testing.T) {
c, err := sh.metaBase.ObjectCounters()
require.NoError(t, err)
- phyBefore := c.Phy
- logicalBefore := c.Logic
+ phyBefore := c.Phy()
+ logicalBefore := c.Logic()
- err = sh.Close(context.Background())
+ err = sh.Close()
require.NoError(t, err)
sh = New(
@@ -370,7 +362,6 @@ func TestRefillMetabase(t *testing.T) {
),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama_another"))),
- WithMetricsWriter(mm),
)
// open Blobstor
@@ -379,7 +370,7 @@ func TestRefillMetabase(t *testing.T) {
// initialize Blobstor
require.NoError(t, sh.Init(context.Background()))
- defer sh.Close(context.Background())
+ defer sh.Close()
checkAllObjs(false)
checkObj(object.AddressOf(tombObj), nil)
@@ -391,14 +382,11 @@ func TestRefillMetabase(t *testing.T) {
c, err = sh.metaBase.ObjectCounters()
require.NoError(t, err)
- require.Equal(t, phyBefore, c.Phy)
- require.Equal(t, logicalBefore, c.Logic)
+ require.Equal(t, phyBefore, c.Phy())
+ require.Equal(t, logicalBefore, c.Logic())
checkAllObjs(true)
checkObj(object.AddressOf(tombObj), tombObj)
checkTombMembers(true)
checkLocked(t, cnrLocked, locked)
- require.Equal(t, int64(len(mObjs)+2), mm.refillCount) // 1 lock + 1 tomb
- require.Equal(t, "completed", mm.refillStatus)
- require.Equal(t, uint32(100), mm.refillPercent)
}
diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go
index 8dc1f0522..abed5278e 100644
--- a/pkg/local_object_storage/shard/count.go
+++ b/pkg/local_object_storage/shard/count.go
@@ -23,15 +23,9 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
cc, err := s.metaBase.ObjectCounters()
if err != nil {
return 0, err
}
- return cc.Logic, nil
+ return cc.Logic(), nil
}
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index 0101817a8..ea481300b 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -2,11 +2,13 @@ package shard
import (
"context"
- "fmt"
+ "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -32,7 +34,8 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) {
p.addr = append(p.addr, addr...)
}
-// Delete removes data from the shard's metaBase and// blobStor.
+// Delete removes data from the shard's writeCache, metaBase and
+// blobStor.
func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete",
trace.WithAttributes(
@@ -44,22 +47,16 @@ func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
s.m.RLock()
defer s.m.RUnlock()
- return s.delete(ctx, prm, false)
+ return s.delete(ctx, prm)
}
-func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (DeleteRes, error) {
+func (s *Shard) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
if s.info.Mode.ReadOnly() {
return DeleteRes{}, ErrReadOnlyMode
} else if s.info.Mode.NoMetabase() {
return DeleteRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return DeleteRes{}, err
- }
- defer release()
-
result := DeleteRes{}
for _, addr := range prm.addr {
select {
@@ -68,25 +65,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
default:
}
- if err := s.validateWritecacheDoesntContainObject(ctx, addr); err != nil {
- if skipFailed {
- continue
- }
- return result, err
- }
+ s.deleteObjectFromWriteCacheSafe(ctx, addr)
- if err := s.deleteFromBlobstor(ctx, addr); err != nil {
- if skipFailed {
- continue
- }
- return result, err
- }
+ s.deleteFromBlobstorSafe(ctx, addr)
if err := s.deleteFromMetabase(ctx, addr); err != nil {
- if skipFailed {
- continue
- }
- return result, err
+ return result, err // stop on metabase error ?
}
result.deleted++
}
@@ -94,52 +78,39 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del
return result, nil
}
-func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr oid.Address) error {
- if !s.hasWriteCache() {
- return nil
+func (s *Shard) deleteObjectFromWriteCacheSafe(ctx context.Context, addr oid.Address) {
+ if s.hasWriteCache() {
+ err := s.writeCache.Delete(ctx, addr)
+ if err != nil && !client.IsErrObjectNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
+ s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.Error(err))
+ }
}
- _, err := s.writeCache.Head(ctx, addr)
- if err == nil {
- s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
- return fmt.Errorf("object %s must be flushed from writecache", addr)
- }
- if client.IsErrObjectNotFound(err) {
- return nil
- }
- return err
}
-func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error {
+func (s *Shard) deleteFromBlobstorSafe(ctx context.Context, addr oid.Address) {
var sPrm meta.StorageIDPrm
sPrm.SetAddress(addr)
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
- s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
+ s.log.Debug(logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
- zap.Error(err))
- return err
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
storageID := res.StorageID()
- if storageID == nil {
- // if storageID is nil it means:
- // 1. there is no such object
- // 2. object stored by writecache: should not happen, as `validateWritecacheDoesntContainObject` called before `deleteFromBlobstor`
- return nil
- }
var delPrm common.DeletePrm
delPrm.Address = addr
delPrm.StorageID = storageID
_, err = s.blobStor.Delete(ctx, delPrm)
- if err != nil && !client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
+ if err != nil {
+ s.log.Debug(logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
- zap.Error(err))
- return err
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
- return nil
}
func (s *Shard) deleteFromMetabase(ctx context.Context, addr oid.Address) error {
@@ -150,12 +121,14 @@ func (s *Shard) deleteFromMetabase(ctx context.Context, addr oid.Address) error
if err != nil {
return err
}
- s.decObjectCounterBy(physical, res.PhyCount())
- s.decObjectCounterBy(logical, res.LogicCount())
- s.decObjectCounterBy(user, res.UserCount())
- s.decContainerObjectCounter(res.RemovedByCnrID())
- s.addToContainerSize(addr.Container().EncodeToString(), -int64(res.LogicSize()))
- s.addToPayloadSize(-int64(res.PhySize()))
+ s.decObjectCounterBy(physical, res.RawObjectsRemoved())
+ s.decObjectCounterBy(logical, res.AvailableObjectsRemoved())
+ removedPayload := res.RemovedPhysicalObjectSizes()[0]
+ logicalRemovedPayload := res.RemovedLogicalObjectSizes()[0]
+ if logicalRemovedPayload > 0 {
+ s.addToContainerSize(addr.Container().EncodeToString(), -int64(logicalRemovedPayload))
+ }
+ s.addToPayloadSize(-int64(removedPayload))
return nil
}
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index c9ce93bc5..3421ac9e0 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -3,6 +3,7 @@ package shard
import (
"context"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@@ -11,61 +12,78 @@ import (
"github.com/stretchr/testify/require"
)
-func TestShard_Delete_SmallObject(t *testing.T) {
- t.Run("small object without write cache", func(t *testing.T) {
+func TestShard_Delete(t *testing.T) {
+ t.Parallel()
+
+ t.Run("without write cache", func(t *testing.T) {
t.Parallel()
- testShard(t, false, 1<<5)
+ testShardDelete(t, false)
})
- t.Run("small object with write cache", func(t *testing.T) {
+ t.Run("with write cache", func(t *testing.T) {
t.Parallel()
- testShard(t, true, 1<<5)
+ testShardDelete(t, true)
})
}
-func TestShard_Delete_BigObject(t *testing.T) {
- t.Run("big object without write cache", func(t *testing.T) {
- t.Parallel()
- testShard(t, false, 1<<20)
- })
-
- t.Run("big object with write cache", func(t *testing.T) {
- t.Parallel()
- testShard(t, true, 1<<20)
- })
-}
-
-func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
+func testShardDelete(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(obj, "foo", "bar")
- testutil.AddPayload(obj, payloadSize)
var putPrm PutPrm
- putPrm.SetObject(obj)
-
var getPrm GetPrm
- getPrm.SetAddress(object.AddressOf(obj))
- var delPrm DeletePrm
- delPrm.SetAddresses(object.AddressOf(obj))
+ t.Run("big object", func(t *testing.T) {
+ testutil.AddPayload(obj, 1<<20)
- _, err := sh.Put(context.Background(), putPrm)
- require.NoError(t, err)
+ putPrm.SetObject(obj)
+ getPrm.SetAddress(object.AddressOf(obj))
- _, err = sh.Get(context.Background(), getPrm)
- require.NoError(t, err)
+ var delPrm DeletePrm
+ delPrm.SetAddresses(object.AddressOf(obj))
- if hasWriteCache {
- require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
- }
- _, err = sh.Delete(context.Background(), delPrm)
- require.NoError(t, err)
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
- _, err = sh.Get(context.Background(), getPrm)
- require.True(t, client.IsErrObjectNotFound(err))
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
+ require.NoError(t, err)
+
+ _, err = sh.Delete(context.TODO(), delPrm)
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ _, err = sh.Get(context.Background(), getPrm)
+ return client.IsErrObjectNotFound(err)
+ }, time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("small object", func(t *testing.T) {
+ obj := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj, "foo", "bar")
+ testutil.AddPayload(obj, 1<<5)
+
+ putPrm.SetObject(obj)
+ getPrm.SetAddress(object.AddressOf(obj))
+
+ var delPrm DeletePrm
+ delPrm.SetAddresses(object.AddressOf(obj))
+
+ _, err := sh.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ _, err = sh.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ _, err = sh.Get(context.Background(), getPrm)
+ return client.IsErrObjectNotFound(err)
+ }, time.Second, 50*time.Millisecond)
+ })
}
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 2c11b6b01..2cdb8dfa8 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -5,9 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -15,16 +13,17 @@ import (
// ExistsPrm groups the parameters of Exists operation.
type ExistsPrm struct {
- // Exists option to set object checked for existence.
- Address oid.Address
- // Exists option to set parent object checked for existence.
- ECParentAddress oid.Address
+ addr oid.Address
}
// ExistsRes groups the resulting values of Exists operation.
type ExistsRes struct {
ex bool
- lc bool
+}
+
+// SetAddress is an Exists option to set object checked for existence.
+func (p *ExistsPrm) SetAddress(addr oid.Address) {
+ p.addr = addr
}
// Exists returns the fact that the object is in the shard.
@@ -32,11 +31,6 @@ func (p ExistsRes) Exists() bool {
return p.ex
}
-// Locked returns the fact that the object is locked.
-func (p ExistsRes) Locked() bool {
- return p.lc
-}
-
// Exists checks if object is presented in shard.
//
// Returns any error encountered that does not allow to
@@ -49,48 +43,35 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
- attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
+ var exists bool
+ var err error
+
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.Disabled() {
return ExistsRes{}, ErrShardDisabled
- } else if s.info.EvacuationInProgress {
- return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ExistsRes{}, err
- }
- defer release()
-
- var exists bool
- var locked bool
-
- if s.info.Mode.NoMetabase() {
+ } else if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
- p.Address = prm.Address
+ p.Address = prm.addr
var res common.ExistsRes
res, err = s.blobStor.Exists(ctx, p)
exists = res.Exists
} else {
var existsPrm meta.ExistsPrm
- existsPrm.SetAddress(prm.Address)
- existsPrm.SetECParent(prm.ECParentAddress)
+ existsPrm.SetAddress(prm.addr)
var res meta.ExistsRes
res, err = s.metaBase.Exists(ctx, existsPrm)
exists = res.Exists()
- locked = res.Locked()
}
return ExistsRes{
ex: exists,
- lc: locked,
}, err
}
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index a262a52cb..13ab39ae0 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -6,13 +6,10 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -33,14 +30,41 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
-type newEpochHandler func(context.Context, uint64)
+// Event represents class of external events.
+type Event interface {
+ typ() eventType
+}
-type newEpochHandlers struct {
+type eventType int
+
+const (
+ _ eventType = iota
+ eventNewEpoch
+)
+
+type newEpoch struct {
+ epoch uint64
+}
+
+func (e newEpoch) typ() eventType {
+ return eventNewEpoch
+}
+
+// EventNewEpoch returns new epoch event.
+func EventNewEpoch(e uint64) Event {
+ return newEpoch{
+ epoch: e,
+ }
+}
+
+type eventHandler func(context.Context, Event)
+
+type eventHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
- handlers []newEpochHandler
+ handlers []eventHandler
}
type gcRunResult struct {
@@ -56,7 +80,6 @@ const (
)
type GCMectrics interface {
- SetShardID(string)
AddRunDuration(d time.Duration, success bool)
AddDeletedCount(deleted, failed uint64)
AddExpiredObjectCollectionDuration(d time.Duration, success bool, objectType string)
@@ -65,7 +88,6 @@ type GCMectrics interface {
type noopGCMetrics struct{}
-func (m *noopGCMetrics) SetShardID(string) {}
func (m *noopGCMetrics) AddRunDuration(time.Duration, bool) {}
func (m *noopGCMetrics) AddDeletedCount(uint64, uint64) {}
func (m *noopGCMetrics) AddExpiredObjectCollectionDuration(time.Duration, bool, string) {}
@@ -82,10 +104,8 @@ type gc struct {
remover func(context.Context) gcRunResult
- // newEpochChan is used only for listening for the new epoch event.
- // It is ok to keep opened, we are listening for context done when writing in it.
- newEpochChan chan uint64
- newEpochHandlers *newEpochHandlers
+ eventChan chan Event
+ mEventHandler map[eventType]*eventHandlers
}
type gcCfg struct {
@@ -95,8 +115,8 @@ type gcCfg struct {
workerPoolInit func(int) util.WorkerPool
- expiredCollectorWorkerCount int
- expiredCollectorBatchSize int
+ expiredCollectorWorkersCount int
+ expiredCollectorBatchSize int
metrics GCMectrics
@@ -106,7 +126,7 @@ type gcCfg struct {
func defaultGCCfg() gcCfg {
return gcCfg{
removerInterval: 10 * time.Second,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
workerPoolInit: func(int) util.WorkerPool {
return nil
},
@@ -115,8 +135,16 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
- gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
+ sz := 0
+
+ for _, v := range gc.mEventHandler {
+ sz += len(v.handlers)
+ }
+
+ if sz > 0 {
+ gc.workerPool = gc.workerPoolInit(sz)
+ }
+
gc.wg.Add(2)
go gc.tickRemover(ctx)
go gc.listenEvents(ctx)
@@ -126,67 +154,47 @@ func (gc *gc) listenEvents(ctx context.Context) {
defer gc.wg.Done()
for {
- select {
- case <-gc.stopChannel:
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
+ event, ok := <-gc.eventChan
+ if !ok {
+ gc.log.Warn(logs.ShardStopEventListenerByClosedChannel)
return
- case <-ctx.Done():
- gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
- return
- case event, ok := <-gc.newEpochChan:
- if !ok {
- gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
- return
- }
-
- gc.handleEvent(ctx, event)
}
+
+ gc.handleEvent(ctx, event)
}
}
-func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
+func (gc *gc) handleEvent(ctx context.Context, event Event) {
+ v, ok := gc.mEventHandler[event.typ()]
+ if !ok {
+ return
+ }
+
+ v.cancelFunc()
+ v.prevGroup.Wait()
var runCtx context.Context
- runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
+ runCtx, v.cancelFunc = context.WithCancel(ctx)
- gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
+ v.prevGroup.Add(len(v.handlers))
- for i := range gc.newEpochHandlers.handlers {
- select {
- case <-ctx.Done():
- return
- default:
- }
- h := gc.newEpochHandlers.handlers[i]
+ for i := range v.handlers {
+ h := v.handlers[i]
err := gc.workerPool.Submit(func() {
- defer gc.newEpochHandlers.prevGroup.Done()
- h(runCtx, epoch)
+ defer v.prevGroup.Done()
+ h(runCtx, event)
})
if err != nil {
- gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
- zap.Error(err),
+ gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
+ zap.String("error", err.Error()),
)
- gc.newEpochHandlers.prevGroup.Done()
+ v.prevGroup.Done()
}
}
}
-func (gc *gc) releaseResources(ctx context.Context) {
- if gc.workerPool != nil {
- gc.workerPool.Release()
- }
-
- // Avoid to close gc.eventChan here,
- // because it is possible that we are close it earlier than stop writing.
- // It is ok to keep it opened.
-
- gc.log.Debug(ctx, logs.ShardGCIsStopped)
-}
-
func (gc *gc) tickRemover(ctx context.Context) {
defer gc.wg.Done()
@@ -195,13 +203,14 @@ func (gc *gc) tickRemover(ctx context.Context) {
for {
select {
- case <-ctx.Done():
- // Context canceled earlier than we start to close shards.
- // It make sense to stop collecting garbage by context too.
- gc.releaseResources(ctx)
- return
case <-gc.stopChannel:
- gc.releaseResources(ctx)
+ if gc.workerPool != nil {
+ gc.workerPool.Release()
+ }
+
+ close(gc.eventChan)
+
+ gc.log.Debug(logs.ShardGCIsStopped)
return
case <-timer.C:
startedAt := time.Now()
@@ -220,16 +229,13 @@ func (gc *gc) tickRemover(ctx context.Context) {
}
}
-func (gc *gc) stop(ctx context.Context) {
+func (gc *gc) stop() {
gc.onceStop.Do(func() {
close(gc.stopChannel)
})
- gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
+ gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
-
- gc.newEpochHandlers.cancelFunc()
- gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@@ -251,47 +257,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
return
}
- s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
- defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
-
- buf, err := s.getGarbage(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
- zap.Error(err),
- )
-
- return
- } else if len(buf) == 0 {
- result.success = true
- return
- }
-
- var deletePrm DeletePrm
- deletePrm.SetAddresses(buf...)
-
- // delete accumulated objects
- res, err := s.delete(ctx, deletePrm, true)
-
- result.deleted = res.deleted
- result.failedToDelete = uint64(len(buf)) - res.deleted
- result.success = true
-
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
- zap.Error(err),
- )
- result.success = false
- }
-
- return
-}
-
-func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
+ s.log.Debug(logs.ShardGCRemoveGarbageStarted)
+ defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
buf := make([]oid.Address, 0, s.rmBatchSize)
@@ -312,20 +279,55 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
return nil
})
- if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil {
- return nil, err
+ // iterate over metabase's objects with GC mark
+ // (no more than s.rmBatchSize objects)
+ err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
+ if err != nil {
+ s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ } else if len(buf) == 0 {
+ result.success = true
+ return
}
- return buf, nil
-}
+ var deletePrm DeletePrm
+ deletePrm.SetAddresses(buf...)
+
+ // delete accumulated objects
+ res, err := s.delete(ctx, deletePrm)
+
+ result.deleted = res.deleted
+ result.failedToDelete = uint64(len(buf)) - res.deleted
+ result.success = true
+
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
+ zap.String("error", err.Error()),
+ )
+ result.success = false
+ }
-func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
- workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
- batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}
-func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
+func (s *Shard) getExpiredObjectsParameters() (workersCount, batchSize int) {
+ workersCount = minExpiredWorkers
+ batchSize = minExpiredBatchSize
+
+ if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
+ batchSize = s.gc.gcCfg.expiredCollectorBatchSize
+ }
+
+ if s.gc.gcCfg.expiredCollectorWorkersCount > workersCount {
+ workersCount = s.gc.gcCfg.expiredCollectorWorkersCount
+ }
+ return
+}
+
+func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -333,8 +335,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -343,7 +345,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@@ -373,7 +375,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
}
}
@@ -391,32 +393,29 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
return
}
- s.handleExpiredObjectsUnsafe(ctx, expired)
-}
-
-func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) {
- select {
- case <-ctx.Done():
- return
- default:
- }
-
expired, err := s.getExpiredWithLinked(ctx, expired)
if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+ s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
return
}
- res, err := s.inhumeGC(ctx, expired)
+ var inhumePrm meta.InhumePrm
+
+ inhumePrm.SetAddresses(expired...)
+ inhumePrm.SetGCMark()
+
+ // inhume the collected objects
+ res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
- s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeRegular)
- s.decObjectCounterBy(logical, res.LogicInhumed())
- s.decObjectCounterBy(user, res.UserInhumed())
- s.decContainerObjectCounter(res.InhumedByCnrID())
+ s.gc.metrics.AddInhumedObjectCount(res.AvailableInhumed(), objectTypeRegular)
+ s.decObjectCounterBy(logical, res.AvailableInhumed())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -427,12 +426,6 @@ func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Ad
}
func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
result := make([]oid.Address, 0, len(source))
parentToChildren, err := s.metaBase.GetChildren(ctx, source)
if err != nil {
@@ -446,20 +439,7 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address)
return result, nil
}
-func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) {
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return meta.InhumeRes{}, err
- }
- defer release()
-
- var inhumePrm meta.InhumePrm
- inhumePrm.SetAddresses(addrs...)
- inhumePrm.SetGCMark()
- return s.metaBase.Inhume(ctx, inhumePrm)
-}
-
-func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -467,10 +447,11 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
+ epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
- defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
+ log.Debug(logs.ShardStartedExpiredTombstonesHandling)
+ defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -488,29 +469,22 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
})
for {
- log.Debug(ctx, logs.ShardIteratingTombstones)
+ log.Debug(logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+ s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
}
- var release qos.ReleaseFunc
- release, err = s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
- s.m.RUnlock()
- return
- }
err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
- release()
if err != nil {
- log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+ log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
+
return
}
@@ -527,10 +501,8 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
- log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
- if len(tssExp) > 0 {
- s.expiredTombstonesCallback(ctx, tssExp)
- }
+ log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+ s.expiredTombstonesCallback(ctx, tssExp)
iterPrm.SetOffset(tss[tssLen-1].Address())
tss = tss[:0]
@@ -538,7 +510,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
-func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
+func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@@ -546,8 +518,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
+ s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+ defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@@ -557,14 +529,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
- expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
+ expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@@ -578,7 +550,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
- s.expiredLocksCallback(egCtx, epoch, expired)
+ s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
}
@@ -587,7 +559,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
})
if err = errGroup.Wait(); err != nil {
- s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
+ s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
}
}
@@ -599,13 +571,7 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
return ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
+ err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error {
select {
case <-ctx.Done():
return meta.ErrInterruptIterator
@@ -621,11 +587,12 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo
}
func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
}
- defer release()
return s.metaBase.FilterExpired(ctx, epoch, addresses)
}
@@ -635,29 +602,33 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
+ // Mark tombstones as garbage.
+ var pInhume meta.InhumePrm
+
+ tsAddrs := make([]oid.Address, 0, len(tss))
+ for _, ts := range tss {
+ tsAddrs = append(tsAddrs, ts.Tombstone())
+ }
+
+ pInhume.SetGCMark()
+ pInhume.SetAddresses(tsAddrs...)
+
+ // inhume tombstones
+ res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
- return
- }
- res, err := s.metaBase.InhumeTombstones(ctx, tss)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
- s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeTombstone)
- s.decObjectCounterBy(logical, res.LogicInhumed())
- s.decObjectCounterBy(user, res.UserInhumed())
- s.decContainerObjectCounter(res.InhumedByCnrID())
+ s.gc.metrics.AddInhumedObjectCount(res.AvailableInhumed(), objectTypeTombstone)
+ s.decObjectCounterBy(logical, res.AvailableInhumed())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -665,27 +636,26 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size))
i++
}
+
+ // drop just processed expired tombstones
+ // from graveyard
+ err = s.metaBase.DropGraves(ctx, tss)
+ if err != nil {
+ s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
+ }
}
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return
- }
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ if s.GetMode().NoMetabase() {
return
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
return
}
@@ -693,22 +663,18 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
var pInhume meta.InhumePrm
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
- release, err = s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
- return
- }
+
res, err := s.metaBase.Inhume(ctx, pInhume)
- release()
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
+ zap.String("error", err.Error()),
+ )
+
return
}
- s.gc.metrics.AddInhumedObjectCount(res.LogicInhumed(), objectTypeLock)
- s.decObjectCounterBy(logical, res.LogicInhumed())
- s.decObjectCounterBy(user, res.UserInhumed())
- s.decContainerObjectCounter(res.InhumedByCnrID())
+ s.gc.metrics.AddInhumedObjectCount(res.AvailableInhumed(), objectTypeLock)
+ s.decObjectCounterBy(logical, res.AvailableInhumed())
i := 0
for i < res.GetDeletionInfoLength() {
@@ -723,7 +689,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -731,79 +697,26 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
return
}
- s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked)
+ s.handleExpiredObjects(ctx, expiredUnlocked)
}
// HandleDeletedLocks unlocks all objects which were locked by lockers.
-func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
+func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
+ if s.GetMode().NoMetabase() {
return
}
- release, err := s.opsLimiter.WriteRequest(ctx)
+ _, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
- return
- }
- _, err = s.metaBase.FreeLockedBy(lockers)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err))
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
+ zap.String("error", err.Error()),
+ )
+
return
}
}
-// NotificationChannel returns channel for new epoch events.
-func (s *Shard) NotificationChannel() chan<- uint64 {
- return s.gc.newEpochChan
-}
-
-func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
- ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
- defer span.End()
-
- s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
- defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
-
- s.collectExpiredContainerSizeMetrics(ctx, epoch)
- s.collectExpiredContainerCountMetrics(ctx, epoch)
-}
-
-func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
- ids, err := s.metaBase.ZeroSizeContainers(ctx)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
- if len(ids) == 0 {
- return
- }
- s.zeroSizeContainersCallback(ctx, ids)
-}
-
-func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
- ids, err := s.metaBase.ZeroCountContainers(ctx)
- release()
- if err != nil {
- s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
- return
- }
- if len(ids) == 0 {
- return
- }
- s.zeroCountContainersCallback(ctx, ids)
+// NotificationChannel returns channel for shard events.
+func (s *Shard) NotificationChannel() chan<- Event {
+ return s.gc.eventChan
}
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 54d2f1510..332cdf5be 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -30,15 +30,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
rootPath := t.TempDir()
var sh *Shard
- l := test.NewLogger(t)
+ l := test.NewLogger(t, true)
blobOpts := []blobstor.Option{
- blobstor.WithLogger(test.NewLogger(t)),
+ blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -62,8 +60,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
meta.WithEpochState(epochState{}),
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -74,13 +72,16 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
return pool
}),
WithGCRemoverSleepInterval(1 * time.Second),
- WithDisabledGC(),
}
sh = New(opts...)
+ sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+
+ t.Cleanup(func() {
+ require.NoError(t, sh.Close())
+ })
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -100,7 +101,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
_, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err, "failed to get")
- // inhume
+ //inhume
var inhumePrm InhumePrm
inhumePrm.MarkAsGarbage(addr)
_, err = sh.Inhume(context.Background(), inhumePrm)
@@ -109,13 +110,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
require.Error(t, err, "get returned error")
require.True(t, client.IsErrObjectNotFound(err), "invalid error type")
- // storageID
+ //storageID
var metaStIDPrm meta.StorageIDPrm
metaStIDPrm.SetAddress(addr)
storageID, err := sh.metaBase.StorageID(context.Background(), metaStIDPrm)
require.NoError(t, err, "failed to get storage ID")
- // check existence in blobstore
+ //check existence in blobstore
var bsExisted common.ExistsPrm
bsExisted.Address = addr
bsExisted.StorageID = storageID.StorageID()
@@ -123,19 +124,19 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
require.NoError(t, err, "failed to check blobstore existence")
require.True(t, exRes.Exists, "invalid blobstore existence result")
- // drop from blobstor
+ //drop from blobstor
var bsDeletePrm common.DeletePrm
bsDeletePrm.Address = addr
bsDeletePrm.StorageID = storageID.StorageID()
_, err = sh.blobStor.Delete(context.Background(), bsDeletePrm)
require.NoError(t, err, "failed to delete from blobstore")
- // check existence in blobstore
+ //check existence in blobstore
exRes, err = sh.blobStor.Exists(context.Background(), bsExisted)
require.NoError(t, err, "failed to check blobstore existence")
require.False(t, exRes.Exists, "invalid blobstore existence result")
- // get should return object not found
+ //get should return object not found
_, err = sh.Get(context.Background(), getPrm)
require.Error(t, err, "get returned no error")
require.True(t, client.IsErrObjectNotFound(err), "invalid error type")
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index f512a488a..8b535200d 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -5,15 +5,12 @@ import (
"errors"
"testing"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -34,7 +31,6 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -69,12 +65,12 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
_, err = sh.Get(context.Background(), getPrm)
- require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired object must be deleted")
+ require.True(t, client.IsErrObjectNotFound(err), "expired object must be deleted")
}
func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
@@ -131,7 +127,6 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing
})},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
@@ -165,131 +160,8 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
- sh.gc.handleEvent(context.Background(), epoch.Value)
+ sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
- require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
-}
-
-func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
- t.Parallel()
-
- t.Run("flush write-cache before inhume", func(t *testing.T) {
- t.Parallel()
- testGCDropsObjectInhumedFromWritecache(t, true)
- })
-
- t.Run("don't flush write-cache before inhume", func(t *testing.T) {
- t.Parallel()
- testGCDropsObjectInhumedFromWritecache(t, false)
- })
-}
-
-func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
- sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC()},
- wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
- })
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
-
- obj := testutil.GenerateObjectWithSize(1024)
-
- var putPrm PutPrm
- putPrm.SetObject(obj)
- _, err := sh.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- // writecache stores object
- wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
- require.NoError(t, err)
- require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
-
- // blobstore doesn't store object
- bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{
- Address: objectCore.AddressOf(obj),
- })
- require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
- require.Nil(t, bsRes.Object)
- require.Nil(t, bsRes.RawData)
-
- if flushbeforeInhume {
- sh.writeCache.Flush(context.Background(), false, false)
- }
-
- var inhumePrm InhumePrm
- inhumePrm.MarkAsGarbage(objectCore.AddressOf(obj))
- _, err = sh.Inhume(context.Background(), inhumePrm)
- require.NoError(t, err)
-
- // writecache doesn't store object
- wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
- require.Error(t, err)
- require.Nil(t, wcObj)
-
- if flushbeforeInhume {
- // blobstore store object
- bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{
- Address: objectCore.AddressOf(obj),
- })
- require.NoError(t, err)
- require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(bsRes.Object))
- } else {
-
- // blobstore doesn't store object
- bsRes, err = sh.blobStor.Get(context.Background(), common.GetPrm{
- Address: objectCore.AddressOf(obj),
- })
- require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
- require.Nil(t, bsRes.Object)
- require.Nil(t, bsRes.RawData)
- }
-
- gcRes := sh.removeGarbage(context.Background())
- require.True(t, gcRes.success)
- require.Equal(t, uint64(1), gcRes.deleted)
-}
-
-func TestGCDontDeleteObjectFromWritecache(t *testing.T) {
- sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC()},
- wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
- })
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
-
- obj := testutil.GenerateObjectWithSize(1024)
-
- var putPrm PutPrm
- putPrm.SetObject(obj)
- _, err := sh.Put(context.Background(), putPrm)
- require.NoError(t, err)
-
- // writecache stores object
- wcObj, err := sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
- require.NoError(t, err)
- require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
-
- // blobstore doesn't store object
- bsRes, err := sh.blobStor.Get(context.Background(), common.GetPrm{
- Address: objectCore.AddressOf(obj),
- })
- require.ErrorAs(t, err, new(*apistatus.ObjectNotFound))
- require.Nil(t, bsRes.Object)
- require.Nil(t, bsRes.RawData)
-
- var metaInhumePrm meta.InhumePrm
- metaInhumePrm.SetAddresses(objectCore.AddressOf(obj))
- metaInhumePrm.SetLockObjectHandling()
- metaInhumePrm.SetGCMark()
- _, err = sh.metaBase.Inhume(context.Background(), metaInhumePrm)
- require.NoError(t, err)
-
- // logs: WARN shard/delete.go:98 can't remove object: object must be flushed from writecache
- gcRes := sh.removeGarbage(context.Background())
- require.True(t, gcRes.success)
- require.Equal(t, uint64(0), gcRes.deleted)
-
- // writecache stores object
- wcObj, err = sh.writeCache.Head(context.Background(), objectCore.AddressOf(obj))
- require.NoError(t, err)
- require.Equal(t, objectCore.AddressOf(obj), objectCore.AddressOf(wcObj))
+ require.True(t, client.IsErrObjectNotFound(err), "expired complex object must be deleted on epoch after lock expires")
}
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 28f8912be..2e7c84bcd 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -10,6 +10,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -26,9 +27,8 @@ type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object,
// GetPrm groups the parameters of Get operation.
type GetPrm struct {
- addr oid.Address
- skipMeta bool
- skipEvacCheck bool
+ addr oid.Address
+ skipMeta bool
}
// GetRes groups the resulting values of Get operation.
@@ -50,11 +50,6 @@ func (p *GetPrm) SetIgnoreMeta(ignore bool) {
p.skipMeta = ignore
}
-// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress.
-func (p *GetPrm) SkipEvacCheck(val bool) {
- p.skipEvacCheck = val
-}
-
// Object returns the requested object.
func (r GetRes) Object() *objectSDK.Object {
return r.obj
@@ -90,10 +85,6 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, ErrShardDisabled
}
- if s.info.EvacuationInProgress && !prm.skipEvacCheck {
- return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
-
cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) {
var getPrm common.GetPrm
getPrm.Address = prm.addr
@@ -111,12 +102,6 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return c.Get(ctx, prm.addr)
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return GetRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
@@ -149,7 +134,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
} else {
- s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+ s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -158,14 +143,16 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
return res, false, err
}
if client.IsErrObjectNotFound(err) {
- s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
+ s.log.Debug(logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
} else {
- s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
+ s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
- zap.Bool("skip_meta", skipMeta))
+ zap.Bool("skip_meta", skipMeta),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}
if skipMeta || mErr != nil {
@@ -178,7 +165,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
- return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
+ return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 837991b73..19a5e8d70 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -5,9 +5,11 @@ import (
"context"
"errors"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -30,7 +32,6 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var getPrm GetPrm
@@ -47,7 +48,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -65,7 +66,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -93,13 +94,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(child))
- res, err := sh.Get(context.Background(), getPrm)
+ res, err := testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = sh.Get(context.Background(), getPrm)
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -113,6 +114,19 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
+func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) {
+ res, err := sh.Get(context.Background(), getPrm)
+ if hasWriteCache {
+ require.Eventually(t, func() bool {
+ if client.IsErrObjectNotFound(err) {
+ res, err = sh.Get(context.Background(), getPrm)
+ }
+ return !client.IsErrObjectNotFound(err)
+ }, time.Second, time.Millisecond*100)
+ }
+ return res, err
+}
+
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 34b8290d6..a0ec231af 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -4,9 +4,7 @@ import (
"context"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -15,9 +13,8 @@ import (
// HeadPrm groups the parameters of Head operation.
type HeadPrm struct {
- addr oid.Address
- raw bool
- ShardLooksBad bool
+ addr oid.Address
+ raw bool
}
// HeadRes groups the resulting values of Head operation.
@@ -62,8 +59,7 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
var obj *objectSDK.Object
var err error
- mode := s.GetMode()
- if mode.NoMetabase() || (mode.ReadOnly() && prm.ShardLooksBad) {
+ if s.GetMode().NoMetabase() {
var getPrm GetPrm
getPrm.SetAddress(prm.addr)
getPrm.SetIgnoreMeta(true)
@@ -72,21 +68,10 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
res, err = s.Get(ctx, getPrm)
obj = res.Object()
} else {
- s.m.RLock()
- defer s.m.RUnlock()
- if s.info.EvacuationInProgress {
- return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
var headParams meta.GetPrm
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
- release, limitErr := s.opsLimiter.ReadRequest(ctx)
- if limitErr != nil {
- return HeadRes{}, limitErr
- }
- defer release()
-
var res meta.GetRes
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index deb3019df..dfae48e84 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -4,9 +4,11 @@ import (
"context"
"errors"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -28,7 +30,6 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
var putPrm PutPrm
var headPrm HeadPrm
@@ -44,7 +45,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(obj))
- res, err := sh.Head(context.Background(), headPrm)
+ res, err := testHead(t, sh, headPrm, hasWriteCache)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -72,7 +73,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = sh.Head(context.Background(), headPrm)
+ _, err = testHead(t, sh, headPrm, hasWriteCache)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
@@ -83,3 +84,16 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
require.Equal(t, parent.CutPayload(), head.Object())
})
}
+
+func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) {
+ res, err := sh.Head(context.Background(), headPrm)
+ if hasWriteCache {
+ require.Eventually(t, func() bool {
+ if client.IsErrObjectNotFound(err) {
+ res, err = sh.Head(context.Background(), headPrm)
+ }
+ return !client.IsErrObjectNotFound(err)
+ }, time.Second, time.Millisecond*100)
+ }
+ return res, err
+}
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 7391adef2..b0d95e54c 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -2,10 +2,8 @@ package shard
import (
"context"
- "errors"
- "fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/mr-tron/base58"
"go.uber.org/zap"
)
@@ -32,25 +30,28 @@ func (s *Shard) ID() *ID {
// UpdateID reads shard ID saved in the metabase and updates it if it is missing.
func (s *Shard) UpdateID(ctx context.Context) (err error) {
- var idFromMetabase []byte
- modeDegraded := s.GetMode().NoMetabase()
- if !modeDegraded {
- if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
- err = fmt.Errorf("read shard id from metabase: %w", err)
+ if err = s.metaBase.Open(ctx, false); err != nil {
+ return err
+ }
+ defer func() {
+ cErr := s.metaBase.Close()
+ if err == nil {
+ err = cErr
+ }
+ }()
+ id, err := s.metaBase.ReadShardID()
+ if err != nil {
+ return err
+ }
+ if len(id) != 0 {
+ s.info.ID = NewIDFromBytes(id)
+
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.SetShardID(s.info.ID.String())
}
}
- if len(idFromMetabase) != 0 {
- s.info.ID = NewIDFromBytes(idFromMetabase)
- }
-
- shardID := s.info.ID.String()
- s.metricsWriter.SetShardID(shardID)
- if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
- s.writeCache.GetMetrics().SetShardID(shardID)
- }
-
- s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
+ s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
s.metaBase.SetLogger(s.log)
s.blobStor.SetLogger(s.log)
if s.hasWriteCache() {
@@ -61,12 +62,9 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
if s.pilorama != nil {
s.pilorama.SetParentID(s.info.ID.String())
}
- s.opsLimiter.SetParentID(s.info.ID.String())
- if len(idFromMetabase) == 0 && !modeDegraded {
- if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
- err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
- }
+ if len(id) != 0 {
+ return nil
}
- return
+ return s.metaBase.WriteShardID(*s.info.ID)
}
diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go
index f01796ec7..a5b9c1a7d 100644
--- a/pkg/local_object_storage/shard/info.go
+++ b/pkg/local_object_storage/shard/info.go
@@ -16,9 +16,6 @@ type Info struct {
// Shard mode.
Mode mode.Mode
- // True when evacuation is in progress.
- EvacuationInProgress bool
-
// Information about the metabase.
MetaBaseInfo meta.Info
@@ -28,6 +25,9 @@ type Info struct {
// Information about the Write Cache.
WriteCacheInfo writecache.Info
+ // Weight parameters of the shard.
+ WeightValues WeightValues
+
// ErrorCount contains amount of errors occurred in shard operations.
ErrorCount uint32
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index c0fd65f4b..a5f8960c3 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -81,12 +82,6 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return InhumeRes{}, err
- }
- defer release()
-
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(ctx, prm.target[i])
@@ -114,8 +109,9 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
- zap.Error(err),
+ s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
s.m.RUnlock()
@@ -125,9 +121,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
s.m.RUnlock()
- s.decObjectCounterBy(logical, res.LogicInhumed())
- s.decObjectCounterBy(user, res.UserInhumed())
- s.decContainerObjectCounter(res.InhumedByCnrID())
+ s.decObjectCounterBy(logical, res.AvailableInhumed())
i := 0
for i < res.GetDeletionInfoLength() {
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 1421f0e18..6c8e46faf 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -27,7 +27,6 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
@@ -48,7 +47,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = sh.Get(context.Background(), getPrm)
+ _, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
_, err = sh.Inhume(context.Background(), inhPrm)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index af87981ca..d6e4d7e50 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -33,30 +34,6 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
-// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
-type IterateOverContainersPrm struct {
- // Handler function executed upon containers in db.
- Handler func(context.Context, objectSDK.Type, cid.ID) error
-}
-
-// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
-type IterateOverObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
- // Handler function executed upon objects in db.
- Handler func(context.Context, *objectcore.Info) error
-}
-
-// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
-type CountAliveObjectsInContainerPrm struct {
- // ObjectType type of objects to iterate over.
- ObjectType objectSDK.Type
- // ContainerID container for objects to iterate over.
- ContainerID cid.ID
-}
-
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -65,7 +42,7 @@ type ListWithCursorPrm struct {
// ListWithCursorRes contains values returned from ListWithCursor operation.
type ListWithCursorRes struct {
- addrList []objectcore.Info
+ addrList []objectcore.AddressWithType
cursor *Cursor
}
@@ -82,7 +59,7 @@ func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) {
}
// AddressList returns addresses selected by ListWithCursor operation.
-func (r ListWithCursorRes) AddressList() []objectcore.Info {
+func (r ListWithCursorRes) AddressList() []objectcore.AddressWithType {
return r.addrList
}
@@ -106,15 +83,9 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, err
- }
- defer release()
-
lst, err := s.metaBase.Containers(ctx)
if err != nil {
- return res, fmt.Errorf("list stored containers: %w", err)
+ return res, fmt.Errorf("can't list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@@ -127,9 +98,10 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
+ s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
- zap.Error(err))
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
@@ -151,15 +123,9 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
return ListContainersRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListContainersRes{}, err
- }
- defer release()
-
containers, err := s.metaBase.Containers(ctx)
if err != nil {
- return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
+ return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
}
return ListContainersRes{
@@ -185,18 +151,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
return ListWithCursorRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return ListWithCursorRes{}, err
- }
- defer release()
-
var metaPrm meta.ListPrm
metaPrm.SetCount(prm.count)
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
- return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
+ return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
}
return ListWithCursorRes{
@@ -204,96 +164,3 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
cursor: res.Cursor(),
}, nil
}
-
-// IterateOverContainers lists physical containers presented in shard.
-func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
- _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- var metaPrm meta.IterateOverContainersPrm
- metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverContainers(ctx, metaPrm)
- if err != nil {
- return fmt.Errorf("iterate over containers: %w", err)
- }
-
- return nil
-}
-
-// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
-func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
- _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
- trace.WithAttributes(
- attribute.Bool("has_handler", prm.Handler != nil),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- var metaPrm meta.IterateOverObjectsInContainerPrm
- metaPrm.ContainerID = prm.ContainerID
- metaPrm.ObjectType = prm.ObjectType
- metaPrm.Handler = prm.Handler
- err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
- if err != nil {
- return fmt.Errorf("iterate over objects: %w", err)
- }
-
- return nil
-}
-
-// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
-func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
- _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return 0, ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
-
- var metaPrm meta.CountAliveObjectsInContainerPrm
- metaPrm.ObjectType = prm.ObjectType
- metaPrm.ContainerID = prm.ContainerID
- count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
- if err != nil {
- return 0, fmt.Errorf("count alive objects in bucket: %w", err)
- }
-
- return count, nil
-}
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 139b2e316..9ca1753c4 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -18,14 +18,12 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
- defer func() { require.NoError(t, shWC.Close(context.Background())) }()
testShardList(t, shWC)
})
}
@@ -39,11 +37,11 @@ func testShardList(t *testing.T, sh *Shard) {
var errG errgroup.Group
errG.SetLimit(C * N)
- for range C {
+ for i := 0; i < C; i++ {
errG.Go(func() error {
cnr := cidtest.ID()
- for range N {
+ for j := 0; j < N; j++ {
errG.Go(func() error {
obj := testutil.GenerateObjectWithCID(cnr)
testutil.AddPayload(obj, 1<<2)
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index 9c392fdac..52186cbfd 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -38,13 +38,7 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- err = s.metaBase.Lock(ctx, idCnr, locker, locked)
+ err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -67,12 +61,6 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
-
var prm meta.IsLockedPrm
prm.SetAddress(addr)
@@ -83,27 +71,3 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
return res.Locked(), nil
}
-
-// GetLocks return lock id's of the provided object. Not found object is
-// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("address", addr.EncodeToString()),
- ))
- defer span.End()
-
- m := s.GetMode()
- if m.NoMetabase() {
- return nil, ErrDegradedMode
- }
-
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
-
- return s.metaBase.GetLocks(ctx, addr)
-}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 3878a65cd..ca6b0ca38 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -28,15 +28,13 @@ func TestShard_Lock(t *testing.T) {
var sh *Shard
rootPath := t.TempDir()
- l := logger.NewLoggerWrapper(zap.NewNop())
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(l),
+ WithLogger(&logger.Logger{Logger: zap.NewNop()}),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(2),
blobovniczatree.WithBlobovniczaShallowWidth(2)),
@@ -54,8 +52,8 @@ func TestShard_Lock(t *testing.T) {
meta.WithPath(filepath.Join(rootPath, "meta")),
meta.WithEpochState(epochState{}),
),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
}
@@ -63,7 +61,9 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
+ t.Cleanup(func() {
+ releaseShard(sh, t)
+ })
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
@@ -149,7 +149,6 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
deleted file mode 100644
index 087ba42ef..000000000
--- a/pkg/local_object_storage/shard/metrics.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package shard
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetMode set mode of shard.
- SetMode(mode mode.Mode)
- // SetContainerObjectsCount sets container object count.
- SetContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncContainerObjectsCount increments container object count.
- IncContainerObjectsCount(cnrID string, objectType string)
- // SubContainerObjectsCount subtracts container object count.
- SubContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncRefillObjectsCount increments refill objects count.
- IncRefillObjectsCount(path string, size int, success bool)
- // SetRefillPercent sets refill percent.
- SetRefillPercent(path string, percent uint32)
- // SetRefillStatus sets refill status.
- SetRefillStatus(path string, status string)
- // SetEvacuationInProgress sets evacuation status
- SetEvacuationInProgress(value bool)
-}
-
-type noopMetrics struct{}
-
-var _ MetricsWriter = noopMetrics{}
-
-func (noopMetrics) SetObjectCounter(string, uint64) {}
-func (noopMetrics) AddToObjectCounter(string, int) {}
-func (noopMetrics) AddToContainerSize(string, int64) {}
-func (noopMetrics) AddToPayloadSize(int64) {}
-func (noopMetrics) IncObjectCounter(string) {}
-func (noopMetrics) SetShardID(string) {}
-func (noopMetrics) SetMode(mode.Mode) {}
-func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
-func (noopMetrics) IncContainerObjectsCount(string, string) {}
-func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
-func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
-func (noopMetrics) SetRefillPercent(string, uint32) {}
-func (noopMetrics) SetRefillStatus(string, string) {}
-func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 5230dcad0..23721af6e 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -5,7 +5,6 @@ import (
"path/filepath"
"sync"
"testing"
- "time"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -14,36 +13,18 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
type metricsStore struct {
- mtx sync.Mutex
- objCounters map[string]uint64
- cnrSize map[string]int64
- cnrCount map[string]uint64
- pldSize int64
- mode mode.Mode
- errCounter int64
- refillCount int64
- refillSize int64
- refillPercent uint32
- refillStatus string
-}
-
-func newMetricStore() *metricsStore {
- return &metricsStore{
- objCounters: map[string]uint64{
- "phy": 0,
- "logic": 0,
- },
- cnrSize: make(map[string]int64),
- cnrCount: make(map[string]uint64),
- }
+ mtx sync.Mutex
+ objCounters map[string]uint64
+ cnrSize map[string]int64
+ pldSize int64
+ mode mode.Mode
+ errCounter int64
}
func (m *metricsStore) SetShardID(_ string) {}
@@ -102,6 +83,12 @@ func (m *metricsStore) IncObjectCounter(objectType string) {
m.objCounters[objectType] += 1
}
+func (m *metricsStore) DecObjectCounter(objectType string) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.AddToObjectCounter(objectType, -1)
+}
+
func (m *metricsStore) SetMode(mode mode.Mode) {
m.mtx.Lock()
defer m.mtx.Unlock()
@@ -138,79 +125,20 @@ func (m *metricsStore) DeleteShardMetrics() {
m.errCounter = 0
}
-func (m *metricsStore) SetContainerObjectsCount(cnrID string, objectType string, value uint64) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
- m.cnrCount[cnrID+objectType] = value
-}
-
-func (m *metricsStore) IncContainerObjectsCount(cnrID string, objectType string) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
- m.cnrCount[cnrID+objectType]++
-}
-
-func (m *metricsStore) SubContainerObjectsCount(cnrID string, objectType string, value uint64) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
- existed := m.cnrCount[cnrID+objectType]
- if existed < value {
- panic("existed value smaller than value to sustract")
- }
- if existed == value {
- delete(m.cnrCount, cnrID+objectType)
- } else {
- m.cnrCount[cnrID+objectType] -= value
- }
-}
-
-func (m *metricsStore) getContainerCount(cnrID, objectType string) (uint64, bool) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
- v, ok := m.cnrCount[cnrID+objectType]
- return v, ok
-}
-
-func (m *metricsStore) IncRefillObjectsCount(_ string, size int, success bool) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- m.refillCount++
- m.refillSize += int64(size)
-}
-
-func (m *metricsStore) SetRefillPercent(_ string, percent uint32) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- m.refillPercent = percent
-}
-
-func (m *metricsStore) SetRefillStatus(_ string, status string) {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- m.refillStatus = status
-}
-
-func (m *metricsStore) SetEvacuationInProgress(bool) {
-}
-
func TestCounters(t *testing.T) {
t.Parallel()
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
- sh.SetMode(context.Background(), mode.ReadOnly)
+ sh.SetMode(mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode)
- sh.SetMode(context.Background(), mode.ReadWrite)
+ sh.SetMode(mode.ReadWrite)
require.Equal(t, mode.ReadWrite, mm.mode)
const objNumber = 10
oo := make([]*objectSDK.Object, objNumber)
- for i := range objNumber {
+ for i := 0; i < objNumber; i++ {
oo[i] = testutil.GenerateObject()
}
@@ -219,40 +147,21 @@ func TestCounters(t *testing.T) {
require.Zero(t, mm.getObjectCounter(logical))
require.Empty(t, mm.containerSizes())
require.Zero(t, mm.payloadSize())
-
- for _, obj := range oo {
- contID, _ := obj.ContainerID()
- v, ok := mm.getContainerCount(contID.EncodeToString(), physical)
- require.Zero(t, v)
- require.False(t, ok)
- v, ok = mm.getContainerCount(contID.EncodeToString(), logical)
- require.Zero(t, v)
- require.False(t, ok)
- v, ok = mm.getContainerCount(contID.EncodeToString(), user)
- require.Zero(t, v)
- require.False(t, ok)
- }
})
var totalPayload int64
- expectedLogicalSizes := make(map[string]int64)
- expected := make(map[cid.ID]meta.ObjectCounters)
+ expectedSizes := make(map[string]int64)
for i := range oo {
cnr, _ := oo[i].ContainerID()
oSize := int64(oo[i].PayloadSize())
- expectedLogicalSizes[cnr.EncodeToString()] += oSize
+ expectedSizes[cnr.EncodeToString()] += oSize
totalPayload += oSize
- expected[cnr] = meta.ObjectCounters{
- Logic: 1,
- Phy: 1,
- User: 1,
- }
}
var prm PutPrm
- for i := range objNumber {
+ for i := 0; i < objNumber; i++ {
prm.SetObject(oo[i])
_, err := sh.Put(context.Background(), prm)
@@ -261,94 +170,46 @@ func TestCounters(t *testing.T) {
require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber), mm.getObjectCounter(logical))
- require.Equal(t, uint64(objNumber), mm.getObjectCounter(user))
- require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
- cc, err := sh.metaBase.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
-
t.Run("inhume_GC", func(t *testing.T) {
var prm InhumePrm
inhumedNumber := objNumber / 4
- for i := range inhumedNumber {
+ for i := 0; i < inhumedNumber; i++ {
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
_, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
-
- cid, ok := oo[i].ContainerID()
- require.True(t, ok)
- expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
-
- if v, ok := expected[cid]; ok {
- v.Logic--
- v.User--
- if v.IsZero() {
- delete(expected, cid)
- } else {
- expected[cid] = v
- }
- }
}
require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(logical))
- require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(user))
- require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
- cc, err := sh.metaBase.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
-
oo = oo[inhumedNumber:]
})
t.Run("inhume_TS", func(t *testing.T) {
var prm InhumePrm
+ ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
- custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- for _, o := range addrFromObjs(oo[:inhumedNumber]) {
- ts := oidtest.Address()
- ts.SetContainer(o.Container())
- prm.SetTarget(ts, o)
- _, err := sh.Inhume(context.Background(), prm)
- require.NoError(t, err)
- }
+ prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
- for i := range inhumedNumber {
- cid, ok := oo[i].ContainerID()
- require.True(t, ok)
- expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
-
- if v, ok := expected[cid]; ok {
- v.Logic--
- v.User--
- if v.IsZero() {
- delete(expected, cid)
- } else {
- expected[cid] = v
- }
- }
- }
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
require.Equal(t, phy, mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(inhumedNumber), mm.getObjectCounter(logical))
- require.Equal(t, custom-uint64(inhumedNumber), mm.getObjectCounter(user))
- require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
- cc, err = sh.metaBase.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
-
oo = oo[inhumedNumber:]
})
@@ -357,7 +218,6 @@ func TestCounters(t *testing.T) {
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
- custom := mm.getObjectCounter(user)
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
@@ -367,28 +227,16 @@ func TestCounters(t *testing.T) {
require.Equal(t, phy-uint64(deletedNumber), mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(deletedNumber), mm.getObjectCounter(logical))
- require.Equal(t, custom-uint64(deletedNumber), mm.getObjectCounter(user))
var totalRemovedpayload uint64
for i := range oo[:deletedNumber] {
removedPayload := oo[i].PayloadSize()
totalRemovedpayload += removedPayload
cnr, _ := oo[i].ContainerID()
- expectedLogicalSizes[cnr.EncodeToString()] -= int64(removedPayload)
-
- if v, ok := expected[cnr]; ok {
- v.Logic--
- v.Phy--
- v.User--
- expected[cnr] = v
- }
+ expectedSizes[cnr.EncodeToString()] -= int64(removedPayload)
}
- require.Equal(t, expectedLogicalSizes, mm.containerSizes())
+ require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.payloadSize())
-
- cc, err = sh.metaBase.ContainerCounters(context.Background())
- require.NoError(t, err)
- require.Equal(t, meta.ContainerCounters{Counts: expected}, cc)
})
}
@@ -404,7 +252,13 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
}),
}
- mm := newMetricStore()
+ mm := &metricsStore{
+ objCounters: map[string]uint64{
+ "phy": 0,
+ "logic": 0,
+ },
+ cnrSize: make(map[string]int64),
+ }
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -414,18 +268,21 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
meta.WithPath(filepath.Join(path, "meta")),
meta.WithEpochState(epochState{})),
WithMetricsWriter(mm),
- WithGCRemoverSleepInterval(time.Hour),
)
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
+ t.Cleanup(func() {
+ sh.Close()
+ })
+
return sh, mm
}
func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
aa := make([]oid.Address, len(oo))
- for i := range len(oo) {
+ for i := 0; i < len(oo); i++ {
aa[i] = objectcore.AddressOf(oo[i])
}
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 901528976..1bab57448 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,8 +1,6 @@
package shard
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -20,21 +18,19 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode")
//
// Returns any error encountered that did not allow
// setting shard mode.
-func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error {
+func (s *Shard) SetMode(m mode.Mode) error {
unlock := s.lockExclusive()
defer unlock()
- return s.setMode(ctx, m)
+ return s.setMode(m)
}
-func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
- s.log.Info(ctx, logs.ShardSettingShardMode,
+func (s *Shard) setMode(m mode.Mode) error {
+ s.log.Info(logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
- components := []interface {
- SetMode(context.Context, mode.Mode) error
- }{
+ components := []interface{ SetMode(mode.Mode) error }{
s.metaBase, s.blobStor,
}
@@ -62,16 +58,18 @@ func (s *Shard) setMode(ctx context.Context, m mode.Mode) error {
if !m.Disabled() {
for i := range components {
- if err := components[i].SetMode(ctx, m); err != nil {
+ if err := components[i].SetMode(m); err != nil {
return err
}
}
}
s.info.Mode = m
- s.metricsWriter.SetMode(s.info.Mode)
+ if s.metricsWriter != nil {
+ s.metricsWriter.SetMode(s.info.Mode)
+ }
- s.log.Info(ctx, logs.ShardShardModeSetSuccessfully,
+ s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/mode/mode.go b/pkg/local_object_storage/shard/mode/mode.go
index dc4d52b0e..49c888d63 100644
--- a/pkg/local_object_storage/shard/mode/mode.go
+++ b/pkg/local_object_storage/shard/mode/mode.go
@@ -8,41 +8,27 @@ type Mode uint32
const (
// ReadWrite is a Mode value for shard that is available
// for read and write operations. Default shard mode.
- ReadWrite Mode = 0b000
-
- // ReadOnly is a Mode value for shard that does not
- // accept write operation but is readable.
- ReadOnly Mode = 0b001
-
- // Degraded is a Mode value for shard when the metabase is unavailable.
- // It is hard to perform some modifying operations in this mode, thus it can only be set by an administrator.
- Degraded Mode = 0b010
-
- // Disabled mode is a mode where a shard is disabled.
- // An existing shard can't have this mode, but it can be used in
- // the configuration or control service commands.
- Disabled Mode = math.MaxUint32
+ ReadWrite Mode = 0
// DegradedReadOnly is a Mode value for shard that is set automatically
// after a certain number of errors is encountered. It is the same as
// `mode.Degraded` but also is read-only.
- DegradedReadOnly Mode = Degraded | ReadOnly
+ DegradedReadOnly = Degraded | ReadOnly
+
+ // Disabled mode is a mode where a shard is disabled.
+ // An existing shard can't have this mode, but it can be used in
+ // the configuration or control service commands.
+ Disabled = math.MaxUint32
)
-// ComponentMode represents basic operation modes for shared components, including READ, READ_WRITE, and DISABLED.
-type ComponentMode uint32
-
const (
- // ComponentReadWrite is a Mode value for component that is available
- // for read and write operations. Default component mode.
- ComponentReadWrite ComponentMode = 0
-
- // ComponentReadOnly is a Mode value for component that does not
+ // ReadOnly is a Mode value for shard that does not
// accept write operation but is readable.
- ComponentReadOnly ComponentMode = 0b001
+ ReadOnly Mode = 1 << iota
- // ComponentDisabled mode is a mode where a component is disabled.
- ComponentDisabled ComponentMode = math.MaxUint32
+ // Degraded is a Mode value for shard when the metabase is unavailable.
+ // It is hard to perform some modifying operations in this mode, thus it can only be set by an administrator.
+ Degraded
)
func (m Mode) String() string {
@@ -62,19 +48,6 @@ func (m Mode) String() string {
}
}
-func (m ComponentMode) String() string {
- switch m {
- default:
- return "UNDEFINED"
- case ComponentReadWrite:
- return "READ_WRITE"
- case ComponentReadOnly:
- return "READ_ONLY"
- case ComponentDisabled:
- return "CLOSED"
- }
-}
-
// NoMetabase returns true iff m is operating without the metabase.
func (m Mode) NoMetabase() bool {
return m&Degraded != 0
@@ -85,39 +58,6 @@ func (m Mode) ReadOnly() bool {
return m&ReadOnly != 0
}
-// ReadOnly returns true iff m prohibits modifying operations with shard.
-func (m ComponentMode) ReadOnly() bool {
- return m&ComponentReadOnly != 0
-}
-
func (m Mode) Disabled() bool {
return m == Disabled
}
-
-func (m ComponentMode) Disabled() bool {
- return m == ComponentDisabled
-}
-
-// ConvertToComponentModeDegraded converts a ShardMode to a corresponding ComponentMode.
-// Disables the component if the node is in degraded mode. Used in Metabase, Writecache, Pilorama.
-func ConvertToComponentModeDegraded(m Mode) ComponentMode {
- if m.NoMetabase() || m.Disabled() {
- return ComponentDisabled
- }
- if m.ReadOnly() {
- return ComponentReadOnly
- }
- return ComponentReadWrite
-}
-
-// ConvertToComponentMode converts a ShardMode to a corresponding ComponentMode.
-// Ignores the degraded mode of the node. Used in Blobstore.
-func ConvertToComponentMode(m Mode) ComponentMode {
- if m.Disabled() {
- return ComponentDisabled
- }
- if m.ReadOnly() {
- return ComponentReadOnly
- }
- return ComponentReadWrite
-}
diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go
new file mode 100644
index 000000000..539dbc97f
--- /dev/null
+++ b/pkg/local_object_storage/shard/move.go
@@ -0,0 +1,62 @@
+package shard
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+// ToMoveItPrm encapsulates parameters for ToMoveIt operation.
+type ToMoveItPrm struct {
+ addr oid.Address
+}
+
+// ToMoveItRes encapsulates results of ToMoveIt operation.
+type ToMoveItRes struct{}
+
+// SetAddress sets object address that should be marked to move into another
+// shard.
+func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
+ p.addr = addr
+}
+
+// ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to
+// another shard.
+func (s *Shard) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (ToMoveItRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ToMoveIt",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ m := s.info.Mode
+ if m.ReadOnly() {
+ return ToMoveItRes{}, ErrReadOnlyMode
+ } else if m.NoMetabase() {
+ return ToMoveItRes{}, ErrDegradedMode
+ }
+
+ var toMovePrm meta.ToMoveItPrm
+ toMovePrm.SetAddress(prm.addr)
+
+ _, err := s.metaBase.ToMoveIt(ctx, toMovePrm)
+ if err != nil {
+ s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
+ zap.String("error", err.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
+ )
+ }
+
+ return ToMoveItRes{}, nil
+}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index f8cb00a31..688b7aae7 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -17,8 +17,7 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
- indexAttributes bool
+ obj *objectSDK.Object
}
// PutRes groups the resulting values of Put operation.
@@ -29,10 +28,6 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
-func (p *PutPrm) SetIndexAttributes(v bool) {
- p.indexAttributes = v
-}
-
// Put saves the object in shard.
//
// Returns any error encountered that
@@ -67,12 +62,6 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var res common.PutRes
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return PutRes{}, err
- }
- defer release()
-
// exist check are not performed there, these checks should be executed
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
@@ -81,13 +70,13 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
- zap.Error(err))
+ s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+ zap.String("err", err.Error()))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
- return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
}
@@ -95,19 +84,15 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
- pPrm.SetIndexAttributes(prm.indexAttributes)
- res, err := s.metaBase.Put(ctx, pPrm)
- if err != nil {
+ if _, err := s.metaBase.Put(ctx, pPrm); err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
- return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
+ return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
}
- if res.Inserted {
- s.incObjectCounter(putPrm.Address.Container(), meta.IsUserObject(prm.obj))
- s.addToPayloadSize(int64(prm.obj.PayloadSize()))
- s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize()))
- }
+ s.incObjectCounter()
+ s.addToPayloadSize(int64(prm.obj.PayloadSize()))
+ s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize()))
}
return PutRes{}, nil
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 443689104..9491543c4 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -87,10 +87,6 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
s.m.RLock()
defer s.m.RUnlock()
- if s.info.EvacuationInProgress {
- return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
- }
-
if s.info.Mode.Disabled() {
return RngRes{}, ErrShardDisabled
}
@@ -131,12 +127,6 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
return obj, nil
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return RngRes{}, err
- }
- defer release()
-
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 06fe9f511..a8bc83307 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -1,7 +1,6 @@
package shard
import (
- "bytes"
"context"
"math"
"path/filepath"
@@ -12,10 +11,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
)
@@ -68,8 +69,11 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
testCase{true, "object in write-cache, out of range, big offset", 100, newRange(101, math.MaxUint64-10)})
}
- wcOpts := []writecache.Option{
- writecache.WithMaxObjectSize(writeCacheMaxSize),
+ wcOpts := writecacheconfig.Options{
+ Type: writecacheconfig.TypeBBolt,
+ BBoltOptions: []writecachebbolt.Option{
+ writecachebbolt.WithMaxObjectSize(writeCacheMaxSize),
+ },
}
sh := newCustomShard(t, hasWriteCache, shardOptions{
@@ -78,9 +82,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -95,7 +97,6 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}),
},
})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
@@ -104,7 +105,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
testutil.AddPayload(obj, tc.payloadSize)
addr := object.AddressOf(obj)
- payload := bytes.Clone(obj.Payload())
+ payload := slice.Copy(obj.Payload())
var putPrm PutPrm
putPrm.SetObject(obj)
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
deleted file mode 100644
index 20f1f2b6f..000000000
--- a/pkg/local_object_storage/shard/rebuild.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package shard
-
-import (
- "context"
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
-)
-
-var ErrRebuildInProgress = errors.New("shard rebuild in progress")
-
-type rebuildTask struct {
- concurrencyLimiter common.RebuildLimiter
- fillPercent int
-}
-
-type rebuilder struct {
- mtx *sync.Mutex
- wg *sync.WaitGroup
- cancel func()
- done chan struct{}
- tasks chan rebuildTask
-}
-
-func newRebuilder() *rebuilder {
- return &rebuilder{
- mtx: &sync.Mutex{},
- wg: &sync.WaitGroup{},
- tasks: make(chan rebuildTask),
- }
-}
-
-func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- if r.done != nil {
- return // already started
- }
- ctx, cancel := context.WithCancel(ctx)
- r.cancel = cancel
- r.done = make(chan struct{})
- r.wg.Add(1)
- go func() {
- defer r.wg.Done()
- for {
- select {
- case <-r.done:
- return
- case t, ok := <-r.tasks:
- if !ok {
- continue
- }
- runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter)
- }
- }
- }()
-}
-
-func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- fillPercent int, concLimiter common.RebuildLimiter,
-) {
- select {
- case <-ctx.Done():
- return
- default:
- }
- log.Info(ctx, logs.BlobstoreRebuildStarted)
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil {
- log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
- } else {
- log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
- }
-}
-
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int,
-) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case r.tasks <- rebuildTask{
- concurrencyLimiter: limiter,
- fillPercent: fillPercent,
- }:
- return nil
- default:
- return ErrRebuildInProgress
- }
-}
-
-func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- if r.done != nil {
- close(r.done)
- }
- if r.cancel != nil {
- r.cancel()
- }
- r.wg.Wait()
- r.cancel = nil
- r.done = nil
- log.Info(ctx, logs.BlobstoreRebuildStopped)
-}
-
-var errMBIsNotAvailable = errors.New("metabase is not available")
-
-type mbStorageIDUpdate struct {
- mb *meta.DB
-}
-
-func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if u.mb == nil {
- return errMBIsNotAvailable
- }
-
- var prm meta.UpdateStorageIDPrm
- prm.SetAddress(addr)
- prm.SetStorageID(storageID)
- _, err := u.mb.UpdateStorageID(ctx, prm)
- return err
-}
-
-type RebuildPrm struct {
- ConcurrencyLimiter common.ConcurrencyLimiter
- TargetFillPercent uint32
-}
-
-func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
-
- limiter := &rebuildLimiter{
- concurrencyLimiter: p.ConcurrencyLimiter,
- rateLimiter: s.opsLimiter,
- }
- return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent))
-}
-
-var _ common.RebuildLimiter = (*rebuildLimiter)(nil)
-
-type rebuildLimiter struct {
- concurrencyLimiter common.ConcurrencyLimiter
- rateLimiter qos.Limiter
-}
-
-func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) {
- return r.concurrencyLimiter.AcquireWorkSlot(ctx)
-}
-
-func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.ReadRequest(ctx)
- return common.ReleaseFunc(release), err
-}
-
-func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) {
- release, err := r.rateLimiter.WriteRequest(ctx)
- return common.ReleaseFunc(release), err
-}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
deleted file mode 100644
index d90343265..000000000
--- a/pkg/local_object_storage/shard/refill_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package shard
-
-import (
- "context"
- "os"
- "testing"
-
- shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
-)
-
-func BenchmarkRefillMetabase(b *testing.B) {
- b.Run("100 objects", func(b *testing.B) {
- benchRefillMetabase(b, 100)
- })
-
- b.Run("1000 objects", func(b *testing.B) {
- benchRefillMetabase(b, 1000)
- })
-
- b.Run("2000 objects", func(b *testing.B) {
- benchRefillMetabase(b, 2000)
- })
-
- b.Run("5000 objects", func(b *testing.B) {
- benchRefillMetabase(b, 5000)
- })
-}
-
-func benchRefillMetabase(b *testing.B, objectsCount int) {
- sh := newCustomShard(b, false, shardOptions{
- additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)},
- })
-
- defer func() { require.NoError(b, sh.Close(context.Background())) }()
-
- var putPrm PutPrm
-
- for range objectsCount / 2 {
- obj := testutil.GenerateObject()
- testutil.AddAttribute(obj, "foo", "bar")
- testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
-
- putPrm.SetObject(obj)
-
- _, err := sh.Put(context.Background(), putPrm)
- require.NoError(b, err)
- }
-
- for range objectsCount / 2 {
- obj := testutil.GenerateObject()
- testutil.AddAttribute(obj, "foo", "bar")
- obj.SetID(oidtest.ID())
- testutil.AddPayload(obj, 1<<20) // fstree obj
-
- putPrm.SetObject(obj)
-
- _, err := sh.Put(context.Background(), putPrm)
- require.NoError(b, err)
- }
-
- require.NoError(b, sh.Close(context.Background()))
- require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path))
-
- require.NoError(b, sh.Open(context.Background()))
- sh.cfg.refillMetabase = true
-
- b.ReportAllocs()
- b.ResetTimer()
-
- require.NoError(b, sh.Init(context.Background()))
-
- require.NoError(b, sh.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index e563f390b..9cfa267e8 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -27,7 +27,7 @@ func TestShardReload(t *testing.T) {
p := t.Name()
defer os.RemoveAll(p)
- l := test.NewLogger(t)
+ l := test.NewLogger(t, true)
blobOpts := []blobstor.Option{
blobstor.WithLogger(l),
blobstor.WithStorages([]blobstor.SubStorage{
@@ -41,8 +41,7 @@ func TestShardReload(t *testing.T) {
metaOpts := []meta.Option{
meta.WithPath(filepath.Join(p, "meta")),
- meta.WithEpochState(epochState{}),
- }
+ meta.WithEpochState(epochState{})}
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
@@ -50,18 +49,12 @@ func TestShardReload(t *testing.T) {
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(newMetricStore()),
- }
+ pilorama.WithPath(filepath.Join(p, "pilorama")))}
sh := New(opts...)
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
- defer func() {
- require.NoError(t, sh.Close(context.Background()))
- }()
-
objects := make([]objAddr, 5)
for i := range objects {
objects[i].obj = newObject()
@@ -72,7 +65,7 @@ func TestShardReload(t *testing.T) {
checkHasObjects := func(t *testing.T, exists bool) {
for i := range objects {
var prm ExistsPrm
- prm.Address = objects[i].addr
+ prm.SetAddress(objects[i].addr)
res, err := sh.Exists(context.Background(), prm)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index fbc751e26..1615f5fbe 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -15,9 +15,8 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
- isIndexedContainer bool
+ cnr cid.ID
+ filters objectSDK.SearchFilters
}
// SelectRes groups the resulting values of Select operation.
@@ -26,9 +25,8 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID) {
p.cnr = cnr
- p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
@@ -60,20 +58,13 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
return SelectRes{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return SelectRes{}, nil
- }
- defer release()
-
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
- selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
- return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
+ return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
}
return SelectRes{
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d89b56266..00f4fbb9e 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -2,21 +2,22 @@ package shard
import (
"context"
+ "fmt"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
@@ -37,11 +38,8 @@ type Shard struct {
tsSource TombstoneSource
- rb *rebuilder
-
- gcCancel atomic.Value
- setModeRequested atomic.Bool
- writecacheSealCancel atomic.Pointer[writecacheSealCanceler]
+ gcCancel atomic.Value
+ setModeRequested atomic.Bool
}
// Option represents Shard's constructor option.
@@ -56,14 +54,43 @@ type ExpiredObjectsCallback func(context.Context, uint64, []oid.Address)
// DeletedLockCallback is a callback handling list of deleted LOCK objects.
type DeletedLockCallback func(context.Context, []oid.Address)
-// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
-type EmptyContainersCallback func(context.Context, []cid.ID)
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // DecObjectCounter must decrement shard's object counter taking into account
+ // object type.
+ DecObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetReadonly must set shard mode.
+ SetMode(mode mode.Mode)
+ // IncErrorCounter increment error counter.
+ IncErrorCounter()
+ // ClearErrorCounter clear error counter.
+ ClearErrorCounter()
+ // DeleteShardMetrics deletes shard metrics from registry.
+ DeleteShardMetrics()
+}
type cfg struct {
m sync.RWMutex
- refillMetabase bool
- refillMetabaseWorkersCount int
+ refillMetabase bool
rmBatchSize int
@@ -75,7 +102,7 @@ type cfg struct {
metaOpts []meta.Option
- writeCacheOpts []writecache.Option
+ writeCacheOpts writecacheconfig.Options
piloramaOpts []pilorama.Option
@@ -89,30 +116,19 @@ type cfg struct {
deletedLockCallBack DeletedLockCallback
- zeroSizeContainersCallback EmptyContainersCallback
- zeroCountContainersCallback EmptyContainersCallback
-
tsSource TombstoneSource
metricsWriter MetricsWriter
- reportErrorFunc func(ctx context.Context, selfID string, message string, err error)
-
- containerInfo container.InfoProvider
-
- opsLimiter qos.Limiter
+ reportErrorFunc func(selfID string, message string, err error)
}
func defaultCfg() *cfg {
return &cfg{
- rmBatchSize: 100,
- log: logger.NewLoggerWrapper(zap.L()),
- gcCfg: defaultGCCfg(),
- reportErrorFunc: func(context.Context, string, string, error) {},
- zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
- zeroCountContainersCallback: func(context.Context, []cid.ID) {},
- metricsWriter: noopMetrics{},
- opsLimiter: qos.NewNoopLimiter(),
+ rmBatchSize: 100,
+ log: &logger.Logger{Logger: zap.L()},
+ gcCfg: defaultGCCfg(),
+ reportErrorFunc: func(string, string, error) {},
}
}
@@ -134,19 +150,29 @@ func New(opts ...Option) *Shard {
tsSource: c.tsSource,
}
- reportFunc := func(ctx context.Context, msg string, err error) {
- s.reportErrorFunc(ctx, s.ID().String(), msg, err)
+ reportFunc := func(msg string, err error) {
+ s.reportErrorFunc(s.ID().String(), msg, err)
}
s.blobStor.SetReportErrorFunc(reportFunc)
if c.useWriteCache {
- s.writeCache = writecache.New(
- append(c.writeCacheOpts,
- writecache.WithReportErrorFunc(reportFunc),
- writecache.WithBlobstor(bs),
- writecache.WithMetabase(mb))...)
- s.writeCache.GetMetrics().SetPath(s.writeCache.DumpInfo().Path)
+ switch c.writeCacheOpts.Type {
+ case writecacheconfig.TypeBBolt:
+ s.writeCache = writecachebbolt.New(
+ append(c.writeCacheOpts.BBoltOptions,
+ writecachebbolt.WithReportErrorFunc(reportFunc),
+ writecachebbolt.WithBlobstor(bs),
+ writecachebbolt.WithMetabase(mb))...)
+ case writecacheconfig.TypeBadger:
+ s.writeCache = writecachebadger.New(
+ append(c.writeCacheOpts.BadgerOptions,
+ writecachebadger.WithReportErrorFunc(reportFunc),
+ writecachebadger.WithBlobstor(bs),
+ writecachebadger.WithMetabase(mb))...)
+ default:
+ panic(fmt.Sprintf("invalid writecache type: %v", c.writeCacheOpts.Type))
+ }
}
if s.piloramaOpts != nil {
@@ -154,7 +180,6 @@ func New(opts ...Option) *Shard {
}
s.fillInfo()
- s.writecacheSealCancel.Store(notInitializedCancel)
return s
}
@@ -181,7 +206,7 @@ func WithMetaBaseOptions(opts ...meta.Option) Option {
}
// WithWriteCacheOptions returns option to set internal write cache options.
-func WithWriteCacheOptions(opts []writecache.Option) Option {
+func WithWriteCacheOptions(opts writecacheconfig.Options) Option {
return func(c *cfg) {
c.writeCacheOpts = opts
}
@@ -190,7 +215,12 @@ func WithWriteCacheOptions(opts []writecache.Option) Option {
// WithWriteCacheMetrics returns an option to set the metrics register used by the write cache.
func WithWriteCacheMetrics(wcMetrics writecache.Metrics) Option {
return func(c *cfg) {
- c.writeCacheOpts = append(c.writeCacheOpts, writecache.WithMetrics(wcMetrics))
+ switch c.writeCacheOpts.Type {
+ case writecacheconfig.TypeBBolt:
+ c.writeCacheOpts.BBoltOptions = append(c.writeCacheOpts.BBoltOptions, writecachebbolt.WithMetrics(wcMetrics))
+ case writecacheconfig.TypeBadger:
+ c.writeCacheOpts.BadgerOptions = append(c.writeCacheOpts.BadgerOptions, writecachebadger.WithMetrics(wcMetrics))
+ }
}
}
@@ -205,7 +235,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option {
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
- c.gcCfg.log = l.WithTag(logger.TagGC)
+ c.gcCfg.log = l
}
}
@@ -218,7 +248,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
- return s.useWriteCache
+ return s.cfg.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@@ -273,13 +303,6 @@ func WithRefillMetabase(v bool) Option {
}
}
-// WithRefillMetabaseWorkersCount returns option to set count of workers to refill the Metabase on Shard's initialization step.
-func WithRefillMetabaseWorkersCount(v int) Option {
- return func(c *cfg) {
- c.refillMetabaseWorkersCount = v
- }
-}
-
// WithMode returns option to set shard's mode. Mode must be one of the predefined:
// - mode.ReadWrite;
// - mode.ReadOnly.
@@ -321,7 +344,7 @@ func WithGCMetrics(v GCMectrics) Option {
// WithReportErrorFunc returns option to specify callback for handling storage-related errors
// in the background workers.
-func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option {
+func WithReportErrorFunc(f func(selfID string, message string, err error)) Option {
return func(c *cfg) {
c.reportErrorFunc = f
}
@@ -335,59 +358,24 @@ func WithExpiredCollectorBatchSize(size int) Option {
}
}
-// WithExpiredCollectorWorkerCount returns option to set concurrent
+// WithExpiredCollectorWorkersCount returns option to set concurrent
// workers count of expired object collection operation.
-func WithExpiredCollectorWorkerCount(count int) Option {
+func WithExpiredCollectorWorkersCount(count int) Option {
return func(c *cfg) {
- c.gcCfg.expiredCollectorWorkerCount = count
- }
-}
-
-// WithDisabledGC disables GC.
-// For testing purposes only.
-func WithDisabledGC() Option {
- return func(c *cfg) {
- c.gcCfg.testHookRemover = func(_ context.Context) gcRunResult { return gcRunResult{} }
- }
-}
-
-// WithZeroSizeCallback returns option to set zero-size containers callback.
-func WithZeroSizeCallback(cb EmptyContainersCallback) Option {
- return func(c *cfg) {
- c.zeroSizeContainersCallback = cb
- }
-}
-
-// WithZeroCountCallback returns option to set zero-count containers callback.
-func WithZeroCountCallback(cb EmptyContainersCallback) Option {
- return func(c *cfg) {
- c.zeroCountContainersCallback = cb
- }
-}
-
-// WithContainerInfoProvider returns option to set container info provider.
-func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
- return func(c *cfg) {
- c.containerInfo = containerInfo
- }
-}
-
-func WithLimiter(l qos.Limiter) Option {
- return func(c *cfg) {
- c.opsLimiter = l
+ c.gcCfg.expiredCollectorWorkersCount = count
}
}
func (s *Shard) fillInfo() {
- s.info.MetaBaseInfo = s.metaBase.DumpInfo()
- s.info.BlobStorInfo = s.blobStor.DumpInfo()
- s.info.Mode = s.GetMode()
+ s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
+ s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
+ s.cfg.info.Mode = s.GetMode()
- if s.useWriteCache {
- s.info.WriteCacheInfo = s.writeCache.DumpInfo()
+ if s.cfg.useWriteCache {
+ s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
- s.info.PiloramaInfo = s.pilorama.DumpInfo()
+ s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@@ -400,123 +388,87 @@ const (
// counter type (excludes objects that are
// stored but unavailable).
logical = "logic"
- // user is an available small or big regular object.
- user = "user"
)
func (s *Shard) updateMetrics(ctx context.Context) {
- if s.GetMode().NoMetabase() {
- return
- }
-
- cc, err := s.metaBase.ObjectCounters()
- if err != nil {
- s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
- zap.Error(err),
- )
-
- return
- }
-
- s.setObjectCounterBy(physical, cc.Phy)
- s.setObjectCounterBy(logical, cc.Logic)
- s.setObjectCounterBy(user, cc.User)
-
- cnrList, err := s.metaBase.Containers(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
- return
- }
-
- var totalPayload uint64
-
- for i := range cnrList {
- size, err := s.metaBase.ContainerSize(cnrList[i])
+ if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
+ cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
- zap.String("cid", cnrList[i].EncodeToString()),
- zap.Error(err))
- continue
+ s.log.Warn(logs.ShardMetaObjectCounterRead,
+ zap.Error(err),
+ )
+
+ return
}
- s.addToContainerSize(cnrList[i].EncodeToString(), int64(size))
- totalPayload += size
- }
- s.addToPayloadSize(int64(totalPayload))
+ s.cfg.metricsWriter.SetObjectCounter(physical, cc.Phy())
+ s.cfg.metricsWriter.SetObjectCounter(logical, cc.Logic())
- contCount, err := s.metaBase.ContainerCounters(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
- return
+ cnrList, err := s.metaBase.Containers(ctx)
+ if err != nil {
+ s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
+ return
+ }
+
+ var totalPayload uint64
+
+ for i := range cnrList {
+ size, err := s.metaBase.ContainerSize(cnrList[i])
+ if err != nil {
+ s.log.Warn(logs.ShardMetaCantReadContainerSize,
+ zap.String("cid", cnrList[i].EncodeToString()),
+ zap.Error(err))
+ continue
+ }
+ s.metricsWriter.AddToContainerSize(cnrList[i].EncodeToString(), int64(size))
+ totalPayload += size
+ }
+
+ s.metricsWriter.AddToPayloadSize(int64(totalPayload))
}
- for contID, count := range contCount.Counts {
- s.setContainerObjectsCount(contID.EncodeToString(), physical, count.Phy)
- s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
- s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
- }
- s.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
-func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- s.metricsWriter.IncObjectCounter(physical)
- s.metricsWriter.IncObjectCounter(logical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
- if isUser {
- s.metricsWriter.IncObjectCounter(user)
- s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
+func (s *Shard) incObjectCounter() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.IncObjectCounter(physical)
+ s.cfg.metricsWriter.IncObjectCounter(logical)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if v > 0 {
- s.metricsWriter.AddToObjectCounter(typ, -int(v))
- }
-}
-
-func (s *Shard) setObjectCounterBy(typ string, v uint64) {
- if v > 0 {
- s.metricsWriter.SetObjectCounter(typ, v)
- }
-}
-
-func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
- for cnrID, count := range byCnr {
- if count.Phy > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
- }
- if count.Logic > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
- }
- if count.User > 0 {
- s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
- }
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if size != 0 {
- s.metricsWriter.AddToContainerSize(cnr, size)
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if size != 0 {
- s.metricsWriter.AddToPayloadSize(size)
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.AddToPayloadSize(size)
}
}
-func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
- if v > 0 {
- s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
+func (s *Shard) IncErrorCounter() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.IncErrorCounter()
}
}
-func (s *Shard) SetEvacuationInProgress(val bool) {
- s.m.Lock()
- defer s.m.Unlock()
- s.info.EvacuationInProgress = val
- s.metricsWriter.SetEvacuationInProgress(val)
+func (s *Shard) ClearErrorCounter() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.ClearErrorCounter()
+ }
+}
+
+func (s *Shard) DeleteShardMetrics() {
+ if s.cfg.metricsWriter != nil {
+ s.cfg.metricsWriter.DeleteShardMetrics()
+ }
}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 84be71c4d..9da9eb6b8 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -11,7 +11,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -30,7 +32,8 @@ func (s epochState) CurrentEpoch() uint64 {
type shardOptions struct {
rootPath string
- wcOpts []writecache.Option
+ dontRelease bool
+ wcOpts writecacheconfig.Options
bsOpts []blobstor.Option
metaOptions []meta.Option
@@ -45,23 +48,31 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
if o.rootPath == "" {
o.rootPath = t.TempDir()
}
+ if enableWriteCache && o.wcOpts.Type == 0 {
+ o.wcOpts.Type = writecacheconfig.TypeBBolt
+ }
var sh *Shard
if enableWriteCache {
- o.wcOpts = append(
- []writecache.Option{writecache.WithPath(filepath.Join(o.rootPath, "wcache"))},
- o.wcOpts...)
+ switch o.wcOpts.Type {
+ case writecacheconfig.TypeBBolt:
+ o.wcOpts.BBoltOptions = append(
+ []writecachebbolt.Option{writecachebbolt.WithPath(filepath.Join(o.rootPath, "wcache"))},
+ o.wcOpts.BBoltOptions...)
+ case writecacheconfig.TypeBadger:
+ o.wcOpts.BadgerOptions = append(
+ []writecachebadger.Option{writecachebadger.WithPath(filepath.Join(o.rootPath, "wcache"))},
+ o.wcOpts.BadgerOptions...)
+ }
}
if o.bsOpts == nil {
o.bsOpts = []blobstor.Option{
- blobstor.WithLogger(test.NewLogger(t)),
+ blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
- context.Background(),
- blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)),
- blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)),
+ blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@@ -79,19 +90,18 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
- WithLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithBlobStorOptions(o.bsOpts...),
WithMetaBaseOptions(
append([]meta.Option{
- meta.WithPath(filepath.Join(o.rootPath, "meta")), meta.WithEpochState(epochState{}),
- },
+ meta.WithPath(filepath.Join(o.rootPath, "meta")), meta.WithEpochState(epochState{})},
o.metaOptions...)...,
),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))),
WithWriteCache(enableWriteCache),
WithWriteCacheOptions(o.wcOpts),
- WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) {
- sh.HandleDeletedLocks(ctx, addresses)
+ WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) {
+ sh.HandleDeletedLocks(addresses)
}),
WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) {
sh.HandleExpiredLocks(ctx, epoch, a)
@@ -110,5 +120,13 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
+ if !o.dontRelease {
+ t.Cleanup(func() { releaseShard(sh, t) })
+ }
+
return sh
}
+
+func releaseShard(s *Shard, t testing.TB) {
+ require.NoError(t, s.Close())
+}
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index b1232707f..163c3a4ae 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -7,7 +7,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -35,11 +36,14 @@ func TestWriteCacheObjectLoss(t *testing.T) {
}
dir := t.TempDir()
- wcOpts := []writecache.Option{
- writecache.WithMaxObjectSize(smallSize * 2),
+ wcOpts := writecacheconfig.Options{
+ Type: writecacheconfig.TypeBBolt,
+ BBoltOptions: []writecachebbolt.Option{
+ writecachebbolt.WithMaxObjectSize(smallSize * 2),
+ },
}
- sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
+ sh := newCustomShard(t, true, shardOptions{dontRelease: true, rootPath: dir, wcOpts: wcOpts})
var errG errgroup.Group
for i := range objects {
@@ -52,10 +56,9 @@ func TestWriteCacheObjectLoss(t *testing.T) {
})
}
require.NoError(t, errG.Wait())
- require.NoError(t, sh.Close(context.Background()))
+ require.NoError(t, sh.Close())
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
- defer func() { require.NoError(t, sh.Close(context.Background())) }()
var getPrm GetPrm
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index db361a8bd..7795b820d 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -43,11 +43,6 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeMove(ctx, d, treeID, m)
}
@@ -80,11 +75,6 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
@@ -113,46 +103,9 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
-// TreeApplyBatch implements the pilorama.Forest interface.
-func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
- return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m)
-}
-
// TreeGetByPath implements the pilorama.Forest interface.
func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
@@ -177,11 +130,6 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
@@ -207,11 +155,6 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Meta{}, 0, err
- }
- defer release()
return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
@@ -237,43 +180,9 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
-// TreeSortedByFilename implements the pilorama.Forest interface.
-func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("container_id", cid.EncodeToString()),
- attribute.String("tree_id", treeID),
- ),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return nil, last, ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return nil, last, ErrDegradedMode
- }
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, last, err
- }
- defer release()
- return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
-}
-
// TreeGetOpLog implements the pilorama.Forest interface.
func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetOpLog",
@@ -296,11 +205,6 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return pilorama.Move{}, err
- }
- defer release()
return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
@@ -325,11 +229,6 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeDrop(ctx, cid, treeID)
}
@@ -353,11 +252,6 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
return s.pilorama.TreeList(ctx, cid)
}
@@ -381,11 +275,6 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeHeight(ctx, cid, treeID)
}
@@ -410,11 +299,6 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return false, err
- }
- defer release()
return s.pilorama.TreeExists(ctx, cid, treeID)
}
@@ -443,11 +327,6 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
@@ -472,70 +351,5 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return 0, err
- }
- defer release()
return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
-
-func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm) (*pilorama.TreeListTreesResult, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeListTrees",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- ),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return nil, ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.NoMetabase() {
- return nil, ErrDegradedMode
- }
- release, err := s.opsLimiter.ReadRequest(ctx)
- if err != nil {
- return nil, err
- }
- defer release()
- return s.pilorama.TreeListTrees(ctx, prm)
-}
-
-func (s *Shard) PiloramaEnabled() bool {
- return s.pilorama != nil
-}
-
-func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *pilorama.Move) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyStream",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("container_id", cnr.EncodeToString()),
- attribute.String("tree_id", treeID)),
- )
- defer span.End()
-
- if s.pilorama == nil {
- return ErrPiloramaDisabled
- }
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- return ErrDegradedMode
- }
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
- return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source)
-}
diff --git a/pkg/local_object_storage/shard/weight.go b/pkg/local_object_storage/shard/weight.go
new file mode 100644
index 000000000..0ab5ad61d
--- /dev/null
+++ b/pkg/local_object_storage/shard/weight.go
@@ -0,0 +1,12 @@
+package shard
+
+// WeightValues groups values of Shard weight parameters.
+type WeightValues struct {
+ // Amount of free disk space. Measured in kilobytes.
+ FreeSpace uint64
+}
+
+// WeightValues returns current weight values of the Shard.
+func (s *Shard) WeightValues() WeightValues {
+ return s.info.WeightValues
+}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 9edb89df8..7ce279c54 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -4,28 +4,14 @@ import (
"context"
"errors"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
-var (
- dummyCancel = &writecacheSealCanceler{cancel: func() {}}
- notInitializedCancel = &writecacheSealCanceler{cancel: func() {}}
- errWriteCacheSealing = errors.New("writecache is already sealing or shard is not initialized")
-)
-
-type writecacheSealCanceler struct {
- cancel context.CancelFunc
-}
-
// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation.
type FlushWriteCachePrm struct {
ignoreErrors bool
- seal bool
}
// SetIgnoreErrors sets the flag to ignore read-errors during flush.
@@ -33,11 +19,6 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
p.ignoreErrors = ignore
}
-// SetSeal sets the flag to left writecache in read-only mode after flush.
-func (p *FlushWriteCachePrm) SetSeal(v bool) {
- p.seal = v
-}
-
// errWriteCacheDisabled is returned when an operation on write-cache is performed,
// but write-cache is disabled.
var errWriteCacheDisabled = errors.New("write-cache is disabled")
@@ -48,7 +29,6 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.Bool("ignore_errors", p.ignoreErrors),
- attribute.Bool("seal", p.seal),
))
defer span.End()
@@ -67,95 +47,5 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
return ErrDegradedMode
}
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal)
-}
-
-type SealWriteCachePrm struct {
- IgnoreErrors bool
- Async bool
- RestoreMode bool
- Shrink bool
-}
-
-// SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode.
-func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.SealWriteCache",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.Bool("ignore_errors", p.IgnoreErrors),
- attribute.Bool("restore_mode", p.RestoreMode),
- ))
- defer span.End()
-
- if !s.hasWriteCache() {
- return errWriteCacheDisabled
- }
-
- if p.Async {
- ctx = context.WithoutCancel(ctx)
- }
- ctx, cancel := context.WithCancel(ctx)
- canceler := &writecacheSealCanceler{cancel: cancel}
- if !s.writecacheSealCancel.CompareAndSwap(dummyCancel, canceler) {
- return errWriteCacheSealing
- }
- s.m.RLock()
- cleanup := func() {
- s.m.RUnlock()
- s.writecacheSealCancel.Store(dummyCancel)
- }
-
- if s.info.Mode.ReadOnly() {
- cleanup()
- return ErrReadOnlyMode
- }
- if s.info.Mode.NoMetabase() {
- cleanup()
- return ErrDegradedMode
- }
-
- if !p.Async {
- defer cleanup()
- }
- prm := writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink}
- if p.Async {
- started := make(chan struct{})
- go func() {
- close(started)
- defer cleanup()
-
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
- return
- }
- defer release()
-
- s.log.Info(ctx, logs.StartedWritecacheSealAsync)
- if err := s.writeCache.Seal(ctx, prm); err != nil {
- s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
- return
- }
- s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
- }()
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-started:
- return nil
- }
- }
- release, err := s.opsLimiter.WriteRequest(ctx)
- if err != nil {
- return err
- }
- defer release()
-
- return s.writeCache.Seal(ctx, prm)
+ return s.writeCache.Flush(ctx, p.ignoreErrors)
}
diff --git a/pkg/local_object_storage/util/ecinfo.go b/pkg/local_object_storage/util/ecinfo.go
deleted file mode 100644
index a92fbceea..000000000
--- a/pkg/local_object_storage/util/ecinfo.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package util
-
-import (
- "bytes"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-// MergeECInfo ignores conflicts and rewrites `to` with non empty values
-// from `from`.
-func MergeECInfo(from, to *objectSDK.ECInfo) *objectSDK.ECInfo {
- for _, fchunk := range from.Chunks {
- add := true
- for _, tchunk := range to.Chunks {
- if bytes.Equal(tchunk.ID.GetValue(), fchunk.ID.GetValue()) {
- add = false
- break
- }
- }
- if add {
- to.AddChunk(*objectSDK.NewECChunkFromV2(&fchunk))
- }
- }
- return to
-}
diff --git a/pkg/local_object_storage/util/ecinfo_test.go b/pkg/local_object_storage/util/ecinfo_test.go
deleted file mode 100644
index 081006088..000000000
--- a/pkg/local_object_storage/util/ecinfo_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package util
-
-import (
- "crypto/rand"
- "testing"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/stretchr/testify/require"
-)
-
-func TestMergeECInfo(t *testing.T) {
- id := generateV2ID()
- target := objectSDK.NewECInfo()
- var chunk objectSDK.ECChunk
- chunk.Total = 2
- chunk.Index = 0
- chunk.SetID(id)
- target.AddChunk(chunk)
-
- t.Run("merge empty", func(t *testing.T) {
- to := objectSDK.NewECInfo()
-
- result := MergeECInfo(target, to)
- require.Equal(t, result, target)
- })
-
- t.Run("merge existed", func(t *testing.T) {
- to := objectSDK.NewECInfo()
- to.AddChunk(chunk)
-
- result := MergeECInfo(target, to)
- require.Equal(t, result, target)
- })
- t.Run("merge extend", func(t *testing.T) {
- to := objectSDK.NewECInfo()
- var chunk objectSDK.ECChunk
- chunk.Total = 2
- chunk.Index = 1
- chunk.SetID(generateV2ID())
- to.AddChunk(chunk)
-
- result := MergeECInfo(target, to)
- require.Equal(t, len(result.Chunks), 2)
- })
-}
-
-func generateV2ID() oid.ID {
- var buf [32]byte
- _, _ = rand.Read(buf[:])
-
- var id oid.ID
- _ = id.Decode(buf[:])
-
- return id
-}
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index fd85b4501..16c6d73b2 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -2,59 +2,47 @@ package benchmark
import (
"context"
- "fmt"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"github.com/stretchr/testify/require"
)
func BenchmarkWritecacheSeq(b *testing.B) {
const payloadSize = 8 << 10
b.Run("bbolt_seq", func(b *testing.B) {
- benchmarkPutSeq(b, newCache(b), payloadSize)
+ benchmarkPutSeq(b, newBBoltCache(b), payloadSize)
+ })
+ b.Run("badger_seq", func(b *testing.B) {
+ benchmarkPutSeq(b, newBadgerCache(b), payloadSize)
})
}
func BenchmarkWritecachePar(b *testing.B) {
const payloadSize = 8 << 10
b.Run("bbolt_par", func(b *testing.B) {
- benchmarkPutPar(b, newCache(b), payloadSize)
+ benchmarkPutPar(b, newBBoltCache(b), payloadSize)
})
-}
-
-func BenchmarkWriteAfterDelete(b *testing.B) {
- const payloadSize = 32 << 10
- const parallel = 25
-
- cache := newCache(b)
- benchmarkPutPrepare(b, cache)
- b.Run(fmt.Sprintf("%dB_before", payloadSize), func(b *testing.B) {
- b.SetParallelism(parallel)
- benchmarkRunPar(b, cache, payloadSize)
+ b.Run("badger_par", func(b *testing.B) {
+ benchmarkPutPar(b, newBadgerCache(b), payloadSize)
})
- require.NoError(b, cache.Flush(context.Background(), false, false))
- b.Run(fmt.Sprintf("%dB_after", payloadSize), func(b *testing.B) {
- b.SetParallelism(parallel)
- benchmarkRunPar(b, cache, payloadSize)
- })
- require.NoError(b, cache.Close(context.Background()))
}
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size}
b.ResetTimer()
- for range b.N {
+ for n := 0; n < b.N; n++ {
obj := objGen.Next()
rawData, err := obj.Marshal()
require.NoError(b, err, "marshaling object")
@@ -71,12 +59,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
- defer func() { require.NoError(b, cache.Close(context.Background())) }()
- benchmarkRunPar(b, cache, size)
-}
-
-func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
ctx := context.Background()
b.ResetTimer()
@@ -99,24 +82,41 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
}
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
- require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening")
- require.NoError(b, cache.Init(context.Background()), "initializing")
+ require.NoError(b, cache.Open(context.Background(), false), "opening")
+ require.NoError(b, cache.Init(), "initializing")
+ b.Cleanup(func() {
+ require.NoError(b, cache.Close(), "closing")
+ })
}
type testMetabase struct{}
-func (testMetabase) UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) {
+func (testMetabase) UpdateStorageID(meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) {
return meta.UpdateStorageIDRes{}, nil
}
-func newCache(b *testing.B) writecache.Cache {
+func newBBoltCache(b *testing.B) writecache.Cache {
bs := teststore.New(
teststore.WithPut(func(pp common.PutPrm) (common.PutRes, error) { return common.PutRes{}, nil }),
)
- return writecache.New(
- writecache.WithPath(b.TempDir()),
- writecache.WithBlobstor(bs),
- writecache.WithMetabase(testMetabase{}),
- writecache.WithMaxCacheSize(256<<30),
+ return writecachebbolt.New(
+ writecachebbolt.WithPath(b.TempDir()),
+ writecachebbolt.WithBlobstor(bs),
+ writecachebbolt.WithMetabase(testMetabase{}),
+ writecachebbolt.WithMaxCacheSize(256<<30),
+ writecachebbolt.WithSmallObjectSize(128<<10),
+ )
+}
+
+func newBadgerCache(b *testing.B) writecache.Cache {
+ bs := teststore.New(
+ teststore.WithPut(func(pp common.PutPrm) (common.PutRes, error) { return common.PutRes{}, nil }),
+ )
+ return writecachebadger.New(
+ writecachebadger.WithPath(b.TempDir()),
+ writecachebadger.WithBlobstor(bs),
+ writecachebadger.WithMetabase(testMetabase{}),
+ writecachebadger.WithMaxCacheSize(256<<30),
+ writecachebadger.WithGCInterval(10*time.Second),
)
}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
deleted file mode 100644
index ee709ea73..000000000
--- a/pkg/local_object_storage/writecache/cache.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package writecache
-
-import (
- "context"
- "fmt"
- "sync"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-type cache struct {
- options
-
- mode mode.Mode
- modeMtx sync.RWMutex
-
- // flushCh is a channel with objects to flush.
- flushCh chan objectInfo
- // cancel is cancel function, protected by modeMtx in Close.
- cancel atomic.Value
- // wg is a wait group for flush workers.
- wg sync.WaitGroup
- // fsTree contains big files stored directly on file-system.
- fsTree *fstree.FSTree
- // counter contains atomic counters for the number of objects stored in cache.
- counter *fstree.SimpleCounter
-}
-
-// wcStorageType is used for write-cache operations logging.
-const wcStorageType = "write-cache"
-
-type objectInfo struct {
- addr oid.Address
- size uint64
-}
-
-const (
- defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
- defaultMaxCacheSize = 1 << 30 // 1 GiB
-)
-
-var dummyCanceler context.CancelFunc = func() {}
-
-// New creates new writecache instance.
-func New(opts ...Option) Cache {
- c := &cache{
- flushCh: make(chan objectInfo),
- mode: mode.Disabled,
- counter: fstree.NewSimpleCounter(),
-
- options: options{
- log: logger.NewLoggerWrapper(zap.NewNop()),
- maxObjectSize: defaultMaxObjectSize,
- workersCount: defaultFlushWorkersCount,
- maxCacheSize: defaultMaxCacheSize,
- metrics: DefaultMetrics(),
- flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
- qosLimiter: qos.NewNoopLimiter(),
- },
- }
-
- for i := range opts {
- opts[i](&c.options)
- }
-
- return c
-}
-
-// SetLogger sets logger. It is used after the shard ID was generated to use it in logs.
-func (c *cache) SetLogger(l *logger.Logger) {
- c.log = l
-}
-
-func (c *cache) DumpInfo() Info {
- return Info{
- Path: c.path,
- }
-}
-
-// Open opens and initializes database. Reads object counters from the ObjectCounters instance.
-func (c *cache) Open(_ context.Context, mod mode.Mode) error {
- c.modeMtx.Lock()
- defer c.modeMtx.Unlock()
- c.mode = mod
- if mod.NoMetabase() {
- return nil
- }
- err := c.openStore(mode.ConvertToComponentModeDegraded(mod))
- if err != nil {
- return metaerr.Wrap(err)
- }
- c.initCounters()
- return nil
-}
-
-// Init runs necessary services.
-func (c *cache) Init(ctx context.Context) error {
- c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
- if err := c.flushAndDropBBoltDB(ctx); err != nil {
- return fmt.Errorf("flush previous version write-cache database: %w", err)
- }
- ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache
- c.cancel.Store(cancel)
- c.runFlushLoop(ctx)
- return nil
-}
-
-// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
-func (c *cache) Close(ctx context.Context) error {
- if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil {
- cancelValue.(context.CancelFunc)()
- }
- // We cannot lock mutex for the whole operation duration
- // because it is taken by some background workers, so `wg.Wait()` is done without modeMtx.
- c.modeMtx.Lock()
- c.mode = mode.DegradedReadOnly // prevent new operations from being processed
- c.modeMtx.Unlock()
-
- c.wg.Wait()
-
- c.modeMtx.Lock()
- defer c.modeMtx.Unlock()
-
- var err error
- if c.fsTree != nil {
- err = c.fsTree.Close(ctx)
- if err != nil {
- c.fsTree = nil
- }
- }
- c.metrics.Close()
- return nil
-}
-
-func (c *cache) GetMetrics() Metrics {
- return c.metrics
-}
diff --git a/pkg/local_object_storage/writecache/config/config.go b/pkg/local_object_storage/writecache/config/config.go
new file mode 100644
index 000000000..91f097e17
--- /dev/null
+++ b/pkg/local_object_storage/writecache/config/config.go
@@ -0,0 +1,22 @@
+// Package config provides the common configuration options for write cache implementations.
+package config
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
+)
+
+// Type is the write cache implementation type.
+type Type int
+
+const (
+ TypeBBolt Type = iota
+ TypeBadger
+)
+
+// Options are the configuration options for the write cache.
+type Options struct {
+ Type Type
+ BBoltOptions []writecachebbolt.Option
+ BadgerOptions []writecachebadger.Option
+}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
deleted file mode 100644
index 893d27ba2..000000000
--- a/pkg/local_object_storage/writecache/flush.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package writecache
-
-import (
- "context"
- "errors"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
-)
-
-const (
- // defaultFlushWorkersCount is number of workers for putting objects in main storage.
- defaultFlushWorkersCount = 20
- // defaultFlushInterval is default time interval between successive flushes.
- defaultFlushInterval = 10 * time.Second
-)
-
-var errIterationCompleted = errors.New("iteration completed")
-
-// runFlushLoop starts background workers which periodically flush objects to the blobstor.
-func (c *cache) runFlushLoop(ctx context.Context) {
- if c.disableBackgroundFlush {
- return
- }
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String())
- fl := newFlushLimiter(c.flushSizeLimit)
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
- c.pushToFlushQueue(ctx, fl)
- }()
-
- for range c.workersCount {
- c.wg.Add(1)
- go c.workerFlush(ctx, fl)
- }
-}
-
-func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
- stopf := context.AfterFunc(ctx, func() {
- fl.close()
- })
- defer stopf()
-
- tick := time.NewTicker(defaultFlushInterval)
- for {
- select {
- case <-tick.C:
- c.modeMtx.RLock()
- if c.readOnly() || c.noMetabase() {
- c.modeMtx.RUnlock()
- continue
- }
-
- release, err := c.qosLimiter.ReadRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err))
- c.modeMtx.RUnlock()
- continue
- }
- err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
- if err := fl.acquire(oi.DataSize); err != nil {
- return err
- }
- select {
- case c.flushCh <- objectInfo{
- addr: oi.Address,
- size: oi.DataSize,
- }:
- return nil
- case <-ctx.Done():
- fl.release(oi.DataSize)
- return ctx.Err()
- }
- })
- release()
- if err != nil {
- c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
- }
-
- c.modeMtx.RUnlock()
-
- // counter changed by fstree
- c.estimateCacheSize()
- case <-ctx.Done():
- return
- }
- }
-}
-
-func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
- defer c.wg.Done()
-
- var objInfo objectInfo
- for {
- select {
- case objInfo = <-c.flushCh:
- c.flushIfAnObjectExistsWorker(ctx, objInfo, fl)
- case <-ctx.Done():
- return
- }
- }
-}
-
-func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
- defer fl.release(objInfo.size)
-
- release, err := c.qosLimiter.WriteRequest(ctx)
- if err != nil {
- c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err))
- return
- }
- defer release()
- res, err := c.fsTree.Get(ctx, common.GetPrm{
- Address: objInfo.addr,
- })
- if err != nil {
- if !client.IsErrObjectNotFound(err) {
- c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
- }
- return
- }
-
- err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
- if err != nil {
- // Error is handled in flushObject.
- return
- }
-
- c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
-}
-
-func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
- if c.reportError != nil {
- c.reportError(ctx, msg, err)
- } else {
- c.log.Error(ctx, msg,
- zap.String("address", addr),
- zap.Error(err))
- }
-}
-
-func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
- var prm common.IteratePrm
- prm.IgnoreErrors = ignoreErrors
- prm.Handler = func(e common.IterationElement) error {
- sAddr := e.Address.EncodeToString()
-
- var obj objectSDK.Object
- err := obj.Unmarshal(e.ObjectData)
- if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
- if ignoreErrors {
- return nil
- }
- return err
- }
-
- err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree)
- if err != nil {
- return err
- }
-
- c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData)))
- return nil
- }
-
- _, err := c.fsTree.Iterate(ctx, prm)
- return err
-}
-
-// flushObject is used to write object directly to the main storage.
-func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error {
- var err error
-
- defer func() {
- c.metrics.Flush(err == nil, st)
- }()
-
- addr := objectCore.AddressOf(obj)
-
- var prm common.PutPrm
- prm.Object = obj
- prm.RawData = data
-
- res, err := c.blobstor.Put(ctx, prm)
- if err != nil {
- if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
- !errors.Is(err, blobstor.ErrNoPlaceFound) {
- c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor,
- addr.EncodeToString(), err)
- }
- return err
- }
-
- var updPrm meta.UpdateStorageIDPrm
- updPrm.SetAddress(addr)
- updPrm.SetStorageID(res.StorageID)
-
- _, err = c.metabase.UpdateStorageID(ctx, updPrm)
- if err != nil {
- c.reportFlushError(ctx, logs.FSTreeCantUpdateID,
- addr.EncodeToString(), err)
- }
- return err
-}
-
-// Flush flushes all objects from the write-cache to the main storage.
-func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
- trace.WithAttributes(
- attribute.Bool("ignore_errors", ignoreErrors),
- attribute.Bool("seal", seal),
- ))
- defer span.End()
-
- c.modeMtx.Lock() // exclusive lock to not to conflict with background flush
- defer c.modeMtx.Unlock()
- if c.noMetabase() {
- return ErrDegraded
- }
-
- if err := c.flush(ctx, ignoreErrors); err != nil {
- return err
- }
-
- if seal {
- m := c.mode | mode.ReadOnly
- if err := c.setMode(ctx, m, setModePrm{ignoreErrors: ignoreErrors}); err != nil {
- return err
- }
- c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
- }
- return nil
-}
-
-func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
- return c.flushFSTree(ctx, ignoreErrors)
-}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
deleted file mode 100644
index 7fc84657c..000000000
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package writecache
-
-import (
- "context"
- "os"
- "path/filepath"
- "sync/atomic"
- "testing"
-
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap"
-)
-
-func TestFlush(t *testing.T) {
- testlogger := test.NewLogger(t)
-
- createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
- return New(
- append([]Option{
- WithLogger(testlogger),
- WithPath(filepath.Join(t.TempDir(), "writecache")),
- WithMetabase(mb),
- WithBlobstor(bs),
- WithDisableBackgroundFlush(),
- }, opts...)...)
- }
-
- errCountOpt := func() (Option, *atomic.Uint32) {
- cnt := &atomic.Uint32{}
- return WithReportErrorFunc(func(ctx context.Context, msg string, err error) {
- cnt.Add(1)
- testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
- }), cnt
- }
-
- failures := []TestFailureInjector[Option]{
- {
- Desc: "fs, read error",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- obj := testutil.GenerateObject()
- data, err := obj.Marshal()
- require.NoError(t, err)
-
- var prm common.PutPrm
- prm.Address = objectCore.AddressOf(obj)
- prm.RawData = data
-
- _, err = c.fsTree.Put(context.Background(), prm)
- require.NoError(t, err)
-
- p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
- p = filepath.Join(c.fsTree.RootPath, p[:1], p[1:])
-
- _, err = os.Stat(p) // sanity check
- require.NoError(t, err)
- require.NoError(t, os.Truncate(p, 0)) // corrupt the file contents, so that it can't be unmarshalled
- },
- },
- {
- Desc: "fs, invalid object",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- var prm common.PutPrm
- prm.Address = oidtest.Address()
- prm.RawData = []byte{1, 2, 3}
- _, err := c.fsTree.Put(context.Background(), prm)
- require.NoError(t, err)
- },
- },
- }
-
- runFlushTest(t, createCacheFn, errCountOpt, failures...)
-}
-
-const (
- objCount = 4
- smallSize = 256
-)
-
-type CreateCacheFunc[Option any] func(
- t *testing.T,
- meta *meta.DB,
- bs MainStorage,
- opts ...Option,
-) Cache
-
-type TestFailureInjector[Option any] struct {
- Desc string
- InjectFn func(*testing.T, Cache)
-}
-
-type objectPair struct {
- addr oid.Address
- obj *objectSDK.Object
-}
-
-func runFlushTest[Option any](
- t *testing.T,
- createCacheFn CreateCacheFunc[Option],
- errCountOption func() (Option, *atomic.Uint32),
- failures ...TestFailureInjector[Option],
-) {
- t.Run("no errors", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
- objects := putObjects(t, wc)
-
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
-
- require.NoError(t, wc.Flush(context.Background(), false, false))
-
- check(t, mb, bs, objects)
- })
-
- t.Run("flush on moving to degraded mode", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
- objects := putObjects(t, wc)
-
- // Blobstor is read-only, so we expect en error from `flush` here.
- require.Error(t, wc.SetMode(context.Background(), mode.Degraded))
-
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.SetMode(context.Background(), mode.Degraded))
-
- check(t, mb, bs, objects)
- })
-
- t.Run("ignore errors", func(t *testing.T) {
- for _, f := range failures {
- t.Run(f.Desc, func(t *testing.T) {
- errCountOpt, errCount := errCountOption()
- wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
- defer func() { require.NoError(t, wc.Close(context.Background())) }()
- objects := putObjects(t, wc)
- f.InjectFn(t, wc)
-
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite))
-
- require.Equal(t, uint32(0), errCount.Load())
- require.Error(t, wc.Flush(context.Background(), false, false))
- require.Greater(t, errCount.Load(), uint32(0))
- require.NoError(t, wc.Flush(context.Background(), true, false))
-
- check(t, mb, bs, objects)
- })
- }
- })
-}
-
-func newCache[Option any](
- t *testing.T,
- createCacheFn CreateCacheFunc[Option],
- opts ...Option,
-) (Cache, *blobstor.BlobStor, *meta.DB) {
- dir := t.TempDir()
- mb := meta.New(
- meta.WithPath(filepath.Join(dir, "meta")),
- meta.WithEpochState(dummyEpoch{}))
- require.NoError(t, mb.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, mb.Init(context.Background()))
-
- bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
- {
- Storage: fstree.New(
- fstree.WithPath(filepath.Join(dir, "blob")),
- fstree.WithDepth(0),
- fstree.WithDirNameLen(1)),
- },
- }))
- require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, bs.Init(context.Background()))
-
- wc := createCacheFn(t, mb, bs, opts...)
- require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
- require.NoError(t, wc.Init(context.Background()))
-
- // First set mode for metabase and blobstor to prevent background flushes.
- require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly))
- require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly))
-
- return wc, bs, mb
-}
-
-func putObject(t *testing.T, c Cache, size int) objectPair {
- obj := testutil.GenerateObjectWithSize(size)
- data, err := obj.Marshal()
- require.NoError(t, err)
-
- var prm common.PutPrm
- prm.Address = objectCore.AddressOf(obj)
- prm.Object = obj
- prm.RawData = data
-
- _, err = c.Put(context.Background(), prm)
- require.NoError(t, err)
-
- return objectPair{prm.Address, prm.Object}
-}
-
-func putObjects(t *testing.T, c Cache) []objectPair {
- objects := make([]objectPair, objCount)
- for i := range objects {
- objects[i] = putObject(t, c, 1+(i%2)*smallSize)
- }
- return objects
-}
-
-func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) {
- for i := range objects {
- var mPrm meta.StorageIDPrm
- mPrm.SetAddress(objects[i].addr)
-
- mRes, err := mb.StorageID(context.Background(), mPrm)
- require.NoError(t, err)
-
- var prm common.GetPrm
- prm.Address = objects[i].addr
- prm.StorageID = mRes.StorageID()
-
- res, err := bs.Get(context.Background(), prm)
- require.NoError(t, err, objects[i].addr)
- require.Equal(t, objects[i].obj, res.Object)
- }
-}
-
-type dummyEpoch struct{}
-
-func (dummyEpoch) CurrentEpoch() uint64 {
- return 0
-}
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
deleted file mode 100644
index 0e020b36e..000000000
--- a/pkg/local_object_storage/writecache/limiter.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package writecache
-
-import (
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
-)
-
-var errLimiterClosed = errors.New("acquire failed: limiter closed")
-
-// flushLimiter is used to limit the total size of objects
-// being flushed to blobstore at the same time. This is a necessary
-// limitation so that the flushing process does not have
-// a strong impact on user requests.
-type flushLimiter struct {
- count, size uint64
- maxSize uint64
- cond *sync.Cond
- closed bool
-}
-
-func newFlushLimiter(maxSize uint64) *flushLimiter {
- return &flushLimiter{
- maxSize: maxSize,
- cond: sync.NewCond(&sync.Mutex{}),
- }
-}
-
-func (l *flushLimiter) acquire(size uint64) error {
- l.cond.L.Lock()
- defer l.cond.L.Unlock()
-
- // it is allowed to overflow maxSize to allow flushing objects with size > maxSize
- for l.count > 0 && l.size+size > l.maxSize && !l.closed {
- l.cond.Wait()
- if l.closed {
- return errLimiterClosed
- }
- }
- l.count++
- l.size += size
- return nil
-}
-
-func (l *flushLimiter) release(size uint64) {
- l.cond.L.Lock()
- defer l.cond.L.Unlock()
-
- assert.True(l.size >= size, "flushLimiter: invalid size")
- l.size -= size
-
- assert.True(l.count > 0, "flushLimiter: invalid count")
- l.count--
-
- l.cond.Broadcast()
-}
-
-func (l *flushLimiter) close() {
- l.cond.L.Lock()
- defer l.cond.L.Unlock()
-
- l.closed = true
-
- l.cond.Broadcast()
-}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
deleted file mode 100644
index 1ca3e1156..000000000
--- a/pkg/local_object_storage/writecache/limiter_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package writecache
-
-import (
- "sync/atomic"
- "testing"
-
- "github.com/stretchr/testify/require"
- "golang.org/x/sync/errgroup"
-)
-
-func TestLimiter(t *testing.T) {
- var maxSize uint64 = 10
- var single uint64 = 3
- l := newFlushLimiter(uint64(maxSize))
- var currSize atomic.Int64
- var eg errgroup.Group
- for range 10_000 {
- eg.Go(func() error {
- defer l.release(single)
- defer currSize.Add(-1)
- l.acquire(single)
- require.True(t, currSize.Add(1) <= 3)
- return nil
- })
- }
- require.NoError(t, eg.Wait())
-}
diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go
index e3641f85e..5eac06698 100644
--- a/pkg/local_object_storage/writecache/metrics.go
+++ b/pkg/local_object_storage/writecache/metrics.go
@@ -19,17 +19,15 @@ const (
)
type Metrics interface {
- SetShardID(string)
Get(d time.Duration, success bool, st StorageType)
Delete(d time.Duration, success bool, st StorageType)
Put(d time.Duration, success bool, st StorageType)
Flush(success bool, st StorageType)
Evict(st StorageType)
- SetEstimateSize(uint64)
- SetMode(m mode.ComponentMode)
- SetActualCounters(uint64)
- SetPath(path string)
+ SetEstimateSize(db, fstree uint64)
+ SetMode(m mode.Mode)
+ SetActualCounters(db, fstree uint64)
Close()
}
@@ -37,21 +35,17 @@ func DefaultMetrics() Metrics { return metricsStub{} }
type metricsStub struct{}
-func (metricsStub) SetShardID(string) {}
-
-func (metricsStub) SetPath(string) {}
-
func (metricsStub) Get(time.Duration, bool, StorageType) {}
func (metricsStub) Delete(time.Duration, bool, StorageType) {}
func (metricsStub) Put(time.Duration, bool, StorageType) {}
-func (metricsStub) SetEstimateSize(uint64) {}
+func (metricsStub) SetEstimateSize(uint64, uint64) {}
-func (metricsStub) SetMode(mode.ComponentMode) {}
+func (metricsStub) SetMode(mode.Mode) {}
-func (metricsStub) SetActualCounters(uint64) {}
+func (metricsStub) SetActualCounters(uint64, uint64) {}
func (metricsStub) Flush(bool, StorageType) {}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
deleted file mode 100644
index c491be60b..000000000
--- a/pkg/local_object_storage/writecache/mode.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package writecache
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-type setModePrm struct {
- ignoreErrors bool
- shrink bool
-}
-
-// SetMode sets write-cache mode of operation.
-// When shard is put in read-only mode all objects in memory are flushed to disk
-// and all background jobs are suspended.
-func (c *cache) SetMode(ctx context.Context, m mode.Mode) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode",
- trace.WithAttributes(
- attribute.String("mode", m.String()),
- ))
- defer span.End()
-
- c.modeMtx.Lock()
- defer c.modeMtx.Unlock()
-
- err := c.setMode(ctx, m, setModePrm{ignoreErrors: true})
- if err == nil {
- c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
- }
- return err
-}
-
-// setMode applies new mode. Must be called with cache.modeMtx lock taken.
-func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error {
- var err error
- turnOffMeta := m.NoMetabase()
-
- if turnOffMeta && !c.mode.NoMetabase() {
- err = c.flush(ctx, prm.ignoreErrors)
- if err != nil {
- return err
- }
- }
-
- if err := c.closeStorage(ctx, prm.shrink); err != nil {
- return err
- }
-
- // Suspend producers to ensure there are channel send operations in fly.
- // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
- // guarantees that there are no in-fly operations.
- for len(c.flushCh) != 0 {
- c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
- time.Sleep(time.Second)
- }
-
- if turnOffMeta {
- c.mode = m
- return nil
- }
-
- if err = c.openStore(mode.ConvertToComponentModeDegraded(m)); err != nil {
- return err
- }
-
- c.mode = m
- return nil
-}
-
-func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
- if c.fsTree == nil {
- return nil
- }
- if !shrink {
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
- }
- return nil
- }
-
- empty := true
- _, err := c.fsTree.Iterate(ctx, common.IteratePrm{
- Handler: func(common.IterationElement) error {
- return errIterationCompleted
- },
- })
- if err != nil {
- if errors.Is(err, errIterationCompleted) {
- empty = false
- } else {
- return fmt.Errorf("check write-cache items: %w", err)
- }
- }
- if err := c.fsTree.Close(ctx); err != nil {
- return fmt.Errorf("close write-cache storage: %w", err)
- }
- if empty {
- err := os.RemoveAll(c.path)
- if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("remove write-cache files: %w", err)
- }
- } else {
- c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
- }
- return nil
-}
-
-// readOnly returns true if current mode is read-only.
-// `c.modeMtx` must be taken.
-func (c *cache) readOnly() bool {
- return c.mode.ReadOnly()
-}
-
-// noMetabase returns true if c is operating without the metabase.
-// `c.modeMtx` must be taken.
-func (c *cache) noMetabase() bool {
- return c.mode.NoMetabase()
-}
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
deleted file mode 100644
index 4fbadbc64..000000000
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package writecache
-
-import (
- "context"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "github.com/stretchr/testify/require"
-)
-
-func TestMode(t *testing.T) {
- t.Parallel()
- wc := New(
- WithLogger(test.NewLogger(t)),
- WithFlushWorkersCount(2),
- WithPath(t.TempDir()))
-
- require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
- require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
- require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
-
- require.NoError(t, wc.Open(context.Background(), mode.Degraded))
- require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Init(context.Background()))
- require.Nil(t, wc.(*cache).fsTree)
- require.NoError(t, wc.Close(context.Background()))
-}
diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go
deleted file mode 100644
index fa224f5e0..000000000
--- a/pkg/local_object_storage/writecache/seal.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package writecache
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-func (c *cache) Seal(ctx context.Context, prm SealPrm) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Seal",
- trace.WithAttributes(
- attribute.Bool("ignore_errors", prm.IgnoreErrors),
- attribute.Bool("restore_mode", prm.RestoreMode),
- ))
- defer span.End()
-
- c.modeMtx.Lock()
- defer c.modeMtx.Unlock()
-
- sourceMode := c.mode
- // flush will be done by setMode
- err := c.setMode(ctx, mode.DegradedReadOnly, setModePrm{ignoreErrors: prm.IgnoreErrors, shrink: prm.Shrink})
- if err != nil {
- return err
- }
- c.metrics.SetMode(mode.ComponentDisabled)
- if prm.RestoreMode {
- err = c.setMode(ctx, sourceMode, setModePrm{ignoreErrors: prm.IgnoreErrors})
- if err == nil {
- c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode))
- }
- }
- return err
-}
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
deleted file mode 100644
index 7a52d3672..000000000
--- a/pkg/local_object_storage/writecache/state.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package writecache
-
-func (c *cache) estimateCacheSize() (uint64, uint64) {
- count, size := c.counter.CountSize()
- c.metrics.SetEstimateSize(size)
- c.metrics.SetActualCounters(count)
- return count, size
-}
-
-func (c *cache) hasEnoughSpace(objectSize uint64) bool {
- count, size := c.estimateCacheSize()
- if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
- return false
- }
- return c.maxCacheSize >= size+objectSize
-}
-
-func (c *cache) initCounters() {
- c.estimateCacheSize()
-}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
deleted file mode 100644
index e88566cdf..000000000
--- a/pkg/local_object_storage/writecache/storage.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package writecache
-
-import (
- "context"
- "fmt"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-func (c *cache) openStore(mod mode.ComponentMode) error {
- err := util.MkdirAllX(c.path, os.ModePerm)
- if err != nil {
- return err
- }
-
- c.fsTree = fstree.New(
- fstree.WithPath(c.path),
- fstree.WithPerm(os.ModePerm),
- fstree.WithDepth(1),
- fstree.WithDirNameLen(1),
- fstree.WithNoSync(c.noSync),
- fstree.WithFileCounter(c.counter),
- )
- if err := c.fsTree.Open(mod); err != nil {
- return fmt.Errorf("open FSTree: %w", err)
- }
- if err := c.fsTree.Init(); err != nil {
- return fmt.Errorf("init FSTree: %w", err)
- }
-
- return nil
-}
-
-func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
- _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
- if err != nil && !client.IsErrObjectNotFound(err) {
- c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
- } else if err == nil {
- storagelog.Write(ctx, c.log,
- storagelog.AddressField(addr.EncodeToString()),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("fstree DELETE"),
- )
- c.metrics.Evict(StorageTypeFSTree)
- // counter changed by fstree
- c.estimateCacheSize()
- }
-}
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
deleted file mode 100644
index 5eb341ba4..000000000
--- a/pkg/local_object_storage/writecache/upgrade.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package writecache
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io/fs"
- "os"
- "path/filepath"
- "time"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
-)
-
-const dbName = "small.bolt"
-
-var defaultBucket = []byte{0}
-
-func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
- _, err := os.Stat(filepath.Join(c.path, dbName))
- if err != nil && os.IsNotExist(err) {
- return nil
- }
- if err != nil {
- return fmt.Errorf("check write-cache database existence: %w", err)
- }
- db, err := OpenDB(c.path, true, os.OpenFile)
- if err != nil {
- return fmt.Errorf("open write-cache database: %w", err)
- }
- defer func() {
- _ = db.Close()
- }()
-
- var last string
- for {
- batch, err := c.readNextDBBatch(db, last)
- if err != nil {
- return err
- }
- if len(batch) == 0 {
- break
- }
- for _, item := range batch {
- var obj objectSDK.Object
- if err := obj.Unmarshal(item.data); err != nil {
- return fmt.Errorf("unmarshal object from database: %w", err)
- }
- if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
- return fmt.Errorf("flush object from database: %w", err)
- }
- }
- last = batch[len(batch)-1].address
- }
- if err := db.Close(); err != nil {
- return fmt.Errorf("close write-cache database: %w", err)
- }
- if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
- return fmt.Errorf("remove write-cache database: %w", err)
- }
- return nil
-}
-
-type batchItem struct {
- data []byte
- address string
-}
-
-func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
- const batchSize = 100
- var batch []batchItem
- err := db.View(func(tx *bbolt.Tx) error {
- var addr oid.Address
-
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
- for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
- sa := string(k)
- if sa == last {
- continue
- }
- if err := addr.DecodeString(sa); err != nil {
- return fmt.Errorf("decode address from database: %w", err)
- }
-
- batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
- if len(batch) == batchSize {
- return errIterationCompleted
- }
- }
- return nil
- })
- if err == nil || errors.Is(err, errIterationCompleted) {
- return batch, nil
- }
- return nil, err
-}
-
-// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
- return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
- NoFreelistSync: true,
- ReadOnly: ro,
- Timeout: 100 * time.Millisecond,
- OpenFile: openFile,
- })
-}
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 7ed511318..c2fdc100f 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -20,12 +20,6 @@ type Info struct {
Path string
}
-type SealPrm struct {
- IgnoreErrors bool
- RestoreMode bool
- Shrink bool
-}
-
// Cache represents write-cache for objects.
type Cache interface {
Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error)
@@ -38,35 +32,31 @@ type Cache interface {
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
Delete(context.Context, oid.Address) error
Put(context.Context, common.PutPrm) (common.PutRes, error)
- SetMode(context.Context, mode.Mode) error
+ SetMode(mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
- Flush(context.Context, bool, bool) error
- Seal(context.Context, SealPrm) error
+ Flush(context.Context, bool) error
- Init(context.Context) error
- Open(ctx context.Context, mode mode.Mode) error
- Close(context.Context) error
- GetMetrics() Metrics
+ Init() error
+ Open(ctx context.Context, readOnly bool) error
+ Close() error
}
// MainStorage is the interface of the underlying storage of Cache implementations.
type MainStorage interface {
- Compressor() *compression.Compressor
+ Compressor() *compression.Config
Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error)
Put(context.Context, common.PutPrm) (common.PutRes, error)
}
// Metabase is the interface of the metabase used by Cache implementations.
type Metabase interface {
- UpdateStorageID(context.Context, meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error)
+ UpdateStorageID(meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error)
}
var (
// ErrReadOnly is returned when Put/Write is performed in a read-only mode.
ErrReadOnly = logicerr.New("write-cache is in read-only mode")
- // ErrDegraded is returned when writecache is in degraded mode.
- ErrDegraded = logicerr.New("write-cache is in degraded mode")
// ErrNotInitialized is returned when write-cache is initializing.
ErrNotInitialized = logicerr.New("write-cache is not initialized yet")
// ErrBigObject is returned when object is too big to be placed in cache.
diff --git a/pkg/local_object_storage/writecache/writecachebadger/cachebadger.go b/pkg/local_object_storage/writecache/writecachebadger/cachebadger.go
new file mode 100644
index 000000000..d5da77635
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/cachebadger.go
@@ -0,0 +1,135 @@
+package writecachebadger
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type cache struct {
+ options
+
+ mode mode.Mode
+ modeMtx sync.RWMutex
+
+ // flushCh is a channel with objects to flush.
+ flushCh chan objectInfo
+ // scheduled4Flush contains objects scheduled for flush via flushCh
+ // helps to avoid multiple flushing of one object
+ scheduled4Flush map[oid.Address]struct{}
+ scheduled4FlushMtx sync.RWMutex
+ // closeCh is close channel, protected by modeMtx.
+ closeCh chan struct{}
+ // wg is a wait group for flush workers.
+ wg sync.WaitGroup
+ // store contains underlying database.
+ store
+}
+
+// wcStorageType is used for write-cache operations logging.
+const wcStorageType = "write-cache"
+
+type objectInfo struct {
+ addr oid.Address
+ data []byte
+ obj *objectSDK.Object
+}
+
+const (
+ defaultMaxObjectSize = 64 << 20 // 64 MiB
+ defaultSmallObjectSize = 32 << 10 // 32 KiB
+ defaultMaxCacheSize = 1 << 30 // 1 GiB
+)
+
+// New creates new writecache instance.
+func New(opts ...Option) writecache.Cache {
+ c := &cache{
+ flushCh: make(chan objectInfo),
+ mode: mode.ReadWrite,
+ scheduled4Flush: map[oid.Address]struct{}{},
+
+ options: options{
+ log: &logger.Logger{Logger: zap.NewNop()},
+ maxObjectSize: defaultMaxObjectSize,
+ workersCount: defaultFlushWorkersCount,
+ maxCacheSize: defaultMaxCacheSize,
+ metrics: writecache.DefaultMetrics(),
+ },
+ }
+
+ for i := range opts {
+ opts[i](&c.options)
+ }
+
+ return c
+}
+
+// SetLogger sets logger. It is used after the shard ID was generated to use it in logs.
+func (c *cache) SetLogger(l *logger.Logger) {
+ c.log = l
+}
+
+func (c *cache) DumpInfo() writecache.Info {
+ return writecache.Info{
+ Path: c.path,
+ }
+}
+
+// Open opens and initializes database. Reads object counters from the ObjectCounters instance.
+func (c *cache) Open(_ context.Context, readOnly bool) error {
+ err := c.openStore(readOnly)
+ if err != nil {
+ return metaerr.Wrap(err)
+ }
+
+ // Opening after Close is done during maintenance mode,
+ // thus we need to create a channel here.
+ c.closeCh = make(chan struct{})
+
+ return metaerr.Wrap(c.initCounters())
+}
+
+// Init runs necessary services.
+func (c *cache) Init() error {
+ c.log.Info(logs.WritecacheBadgerInitExperimental)
+ c.metrics.SetMode(c.mode)
+ c.runFlushLoop()
+ c.runGCLoop()
+ return nil
+}
+
+// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
+func (c *cache) Close() error {
+ // We cannot lock mutex for the whole operation duration
+ // because it is taken by some background workers, so `wg.Wait()` is done without modeMtx.
+ c.modeMtx.Lock()
+ if c.closeCh != nil {
+ close(c.closeCh)
+ }
+ c.mode = mode.DegradedReadOnly // prevent new operations from being processed
+ c.modeMtx.Unlock()
+
+ c.wg.Wait()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ c.closeCh = nil
+ var err error
+ if c.db != nil {
+ err = c.db.Close()
+ if err != nil {
+ c.db = nil
+ }
+ }
+ c.metrics.Close()
+ return nil
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/delete.go b/pkg/local_object_storage/writecache/writecachebadger/delete.go
new file mode 100644
index 000000000..afab78547
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/delete.go
@@ -0,0 +1,70 @@
+package writecachebadger
+
+import (
+ "context"
+ "time"
+
+ storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/dgraph-io/badger/v4"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Delete removes object from write-cache.
+//
+// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache.
+func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
+ _, span := tracing.StartSpanFromContext(ctx, "writecache.Delete",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ deleted := false
+ storageType := writecache.StorageTypeUndefined
+ startedAt := time.Now()
+ defer func() {
+ c.metrics.Delete(time.Since(startedAt), deleted, storageType)
+ }()
+
+ c.modeMtx.RLock()
+ defer c.modeMtx.RUnlock()
+ if c.readOnly() {
+ return writecache.ErrReadOnly
+ }
+
+ key := addr2key(addr)
+
+ err := c.db.Update(func(tx *badger.Txn) error {
+ it, err := tx.Get(key[:])
+ if err != nil {
+ if err == badger.ErrKeyNotFound {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ if it.ValueSize() > 0 {
+ storageType = writecache.StorageTypeDB
+ err := tx.Delete(key[:])
+ if err == nil {
+ storagelog.Write(c.log,
+ storagelog.AddressField(addr.EncodeToString()),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db DELETE"),
+ )
+ deleted = true
+ c.objCounters.DecDB()
+ }
+ return err
+ }
+ return nil
+ })
+
+ return metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/flush.go b/pkg/local_object_storage/writecache/writecachebadger/flush.go
new file mode 100644
index 000000000..3f2bdb043
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/flush.go
@@ -0,0 +1,285 @@
+package writecachebadger
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/dgraph-io/badger/v4"
+ "github.com/dgraph-io/ristretto/z"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+const (
+ // flushBatchSize is amount of keys which will be read from cache to be flushed
+ // to the main storage. It is used to reduce contention between cache put
+ // and cache persist.
+ flushBatchSize = 512
+ // defaultFlushWorkersCount is number of workers for putting objects in main storage.
+ defaultFlushWorkersCount = 20
+ // defaultFlushInterval is default time interval between successive flushes.
+ defaultFlushInterval = time.Second
+)
+
+type collector struct {
+ cache *cache
+ scheduled int
+ processed int
+ cancel func()
+}
+
+func (c *collector) Send(buf *z.Buffer) error {
+ list, err := badger.BufferToKVList(buf)
+ if err != nil {
+ return err
+ }
+ for _, kv := range list.Kv {
+ select {
+ case <-c.cache.closeCh:
+ c.cancel()
+ return nil
+ default:
+ }
+ if kv.StreamDone {
+ return nil
+ }
+ if c.scheduled >= flushBatchSize {
+ c.cancel()
+ return nil
+ }
+ if got, want := len(kv.Key), len(internalKey{}); got != want {
+ c.cache.log.Debug(
+ fmt.Sprintf("not expected db key len: got %d, want %d", got, want))
+ continue
+ }
+ c.processed++
+ obj := objectSDK.New()
+ val := bytes.Clone(kv.Value)
+ if err = obj.Unmarshal(val); err != nil {
+ continue
+ }
+ addr := objectCore.AddressOf(obj)
+ c.cache.scheduled4FlushMtx.RLock()
+ _, ok := c.cache.scheduled4Flush[addr]
+ c.cache.scheduled4FlushMtx.RUnlock()
+ if ok {
+ c.cache.log.Debug(logs.WritecacheBadgerObjAlreadyScheduled, zap.Stringer("obj", addr))
+ continue
+ }
+ c.cache.scheduled4FlushMtx.Lock()
+ c.cache.scheduled4Flush[addr] = struct{}{}
+ c.cache.scheduled4FlushMtx.Unlock()
+ c.scheduled++
+ select {
+ case c.cache.flushCh <- objectInfo{
+ addr: addr,
+ data: val,
+ obj: obj,
+ }:
+ case <-c.cache.closeCh:
+ c.cancel()
+ return nil
+ }
+ }
+ return nil
+}
+
+// runFlushLoop starts background workers which periodically flush objects to the blobstor.
+func (c *cache) runFlushLoop() {
+ for i := 0; i < c.workersCount; i++ {
+ c.wg.Add(1)
+ go c.workerFlushSmall()
+ }
+
+ c.wg.Add(1)
+ go func() {
+ defer c.wg.Done()
+
+ tt := time.NewTimer(defaultFlushInterval)
+ defer tt.Stop()
+
+ for {
+ select {
+ case <-tt.C:
+ c.flushSmallObjects()
+ tt.Reset(defaultFlushInterval)
+ case <-c.closeCh:
+ return
+ }
+ }
+ }()
+}
+
+func (c *cache) flushSmallObjects() {
+ for {
+ select {
+ case <-c.closeCh:
+ return
+ default:
+ }
+ c.modeMtx.RLock()
+ if c.readOnly() {
+ c.modeMtx.RUnlock()
+ time.Sleep(time.Second)
+ continue
+ }
+
+ // Using the db after Close will panic and badger won't wait for outstanding txs,
+ // so we need to check manually.
+ if c.db.IsClosed() {
+ c.modeMtx.RUnlock()
+ return
+ }
+ ctx, cancel := context.WithCancel(context.TODO())
+ coll := collector{
+ cache: c,
+ cancel: cancel,
+ }
+ stream := c.db.NewStream()
+ // All calls to Send are done by a single goroutine
+ stream.Send = coll.Send
+ if err := stream.Orchestrate(ctx); err != nil {
+ c.log.Debug(fmt.Sprintf(
+ "error during flushing object from wc: %s", err))
+ }
+ c.modeMtx.RUnlock()
+ if coll.scheduled == 0 {
+ break
+ }
+ c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
+ zap.Int("scheduled", coll.scheduled), zap.Int("processed", coll.processed))
+ }
+}
+
+func (c *cache) reportFlushError(msg string, addr string, err error) {
+ if c.reportError != nil {
+ c.reportError(msg, err)
+ } else {
+ c.log.Error(msg,
+ zap.String("address", addr),
+ zap.Error(err))
+ }
+}
+
+// workerFlushSmall writes small objects to the main storage.
+func (c *cache) workerFlushSmall() {
+ defer c.wg.Done()
+
+ var objInfo objectInfo
+ for {
+ // Give priority to direct put.
+ select {
+ case objInfo = <-c.flushCh:
+ case <-c.closeCh:
+ return
+ }
+
+ err := c.flushObject(context.TODO(), objInfo.obj, objInfo.data, writecache.StorageTypeDB)
+ if err == nil {
+ c.deleteFromDB([]internalKey{addr2key(objInfo.addr)})
+ }
+ c.scheduled4FlushMtx.Lock()
+ delete(c.scheduled4Flush, objInfo.addr)
+ c.scheduled4FlushMtx.Unlock()
+ }
+}
+
+// flushObject is used to write object directly to the main storage.
+func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st writecache.StorageType) error {
+ var err error
+
+ defer func() {
+ c.metrics.Flush(err == nil, st)
+ }()
+
+ addr := objectCore.AddressOf(obj)
+
+ var prm common.PutPrm
+ prm.Object = obj
+ prm.RawData = data
+
+ res, err := c.blobstor.Put(ctx, prm)
+ if err != nil {
+ if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
+ !errors.Is(err, blobstor.ErrNoPlaceFound) {
+ c.reportFlushError(logs.FrostFSNodeCantFlushObjectToBlobstor,
+ addr.EncodeToString(), err)
+ }
+ return err
+ }
+
+ var updPrm meta.UpdateStorageIDPrm
+ updPrm.SetAddress(addr)
+ updPrm.SetStorageID(res.StorageID)
+
+ _, err = c.metabase.UpdateStorageID(updPrm)
+ if err != nil {
+ c.reportFlushError(logs.FrostFSNodeCantUpdateObjectStorageID,
+ addr.EncodeToString(), err)
+ }
+ return err
+}
+
+// Flush flushes all objects from the write-cache to the main storage.
+// Write-cache must be in readonly mode to ensure correctness of an operation and
+// to prevent interference with background flush workers.
+func (c *cache) Flush(ctx context.Context, ignoreErrors bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", ignoreErrors),
+ ))
+ defer span.End()
+
+ c.modeMtx.RLock()
+ defer c.modeMtx.RUnlock()
+
+ return c.flush(ctx, ignoreErrors)
+}
+
+func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
+ return c.db.View(func(tx *badger.Txn) error {
+ it := tx.NewIterator(badger.DefaultIteratorOptions)
+ defer it.Close()
+ var key internalKey
+ for it.Rewind(); it.Valid(); it.Next() {
+ if got, want := int(it.Item().KeySize()), len(key); got != want {
+ err := fmt.Errorf("invalid db key len: got %d, want %d", got, want)
+ c.reportFlushError(logs.FrostFSNodeCantDecodeObjectAddressFromDB, hex.EncodeToString(it.Item().Key()), metaerr.Wrap(err))
+ if ignoreErrors {
+ continue
+ }
+ return err
+ }
+ if err := it.Item().Value(func(data []byte) error {
+ var obj objectSDK.Object
+ if err := obj.Unmarshal(data); err != nil {
+ copy(key[:], it.Item().Key())
+ c.reportFlushError(logs.FrostFSNodeCantUnmarshalObjectFromDB, key.address().EncodeToString(), metaerr.Wrap(err))
+ if ignoreErrors {
+ return nil
+ }
+ return err
+ }
+
+ return c.flushObject(ctx, &obj, data, writecache.StorageTypeDB)
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/flush_test.go b/pkg/local_object_storage/writecache/writecachebadger/flush_test.go
new file mode 100644
index 000000000..392654e48
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/flush_test.go
@@ -0,0 +1,68 @@
+package writecachebadger
+
+import (
+ "path/filepath"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/dgraph-io/badger/v4"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+)
+
+func TestFlush(t *testing.T) {
+ testlogger := test.NewLogger(t, true)
+
+ createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
+ return New(
+ append([]Option{
+ WithLogger(test.NewLogger(t, true)),
+ WithPath(filepath.Join(t.TempDir(), "writecache")),
+ WithMetabase(mb),
+ WithBlobstor(bs),
+ WithGCInterval(1 * time.Second),
+ }, opts...)...)
+ }
+
+ errCountOpt := func() (Option, *atomic.Uint32) {
+ cnt := &atomic.Uint32{}
+ return WithReportErrorFunc(func(msg string, err error) {
+ cnt.Add(1)
+ testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ }), cnt
+ }
+
+ failures := []writecachetest.TestFailureInjector[Option]{
+ {
+ Desc: "db, invalid address",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ obj := testutil.GenerateObject()
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ require.NoError(t, c.db.Update(func(tx *badger.Txn) error {
+ return tx.Set([]byte{1, 2, 3}, data)
+ }))
+ },
+ },
+ {
+ Desc: "db, invalid object",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ key := addr2key(oidtest.Address())
+ require.NoError(t, c.db.Update(func(tx *badger.Txn) error {
+ return tx.Set(key[:], []byte{1, 2, 3})
+ }))
+ },
+ },
+ }
+
+ writecachetest.TestFlush(t, createCacheFn, errCountOpt, failures...)
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/gc.go b/pkg/local_object_storage/writecache/writecachebadger/gc.go
new file mode 100644
index 000000000..8432a9c04
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/gc.go
@@ -0,0 +1,39 @@
+package writecachebadger
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+)
+
+func (c *cache) runGCLoop() {
+ c.wg.Add(1)
+
+ go func() {
+ defer c.wg.Done()
+
+ t := time.NewTicker(c.gcInterval)
+ defer t.Stop()
+
+ for {
+ select {
+ case <-c.closeCh:
+ return
+ case <-t.C:
+ // This serves to synchronize the c.db field when changing mode as well.
+ c.modeMtx.RLock()
+ ro := c.readOnly()
+ c.modeMtx.RUnlock()
+ if ro {
+ continue
+ }
+
+ // 0.5 is the recommended value so that write amplification of the value log is 2.
+ // See https://pkg.go.dev/github.com/dgraph-io/badger/v4#DB.RunValueLogGC for more info.
+ for c.db.RunValueLogGC(0.5) == nil {
+ c.log.Debug(logs.WritecacheDBValueLogGCRunCompleted)
+ }
+ }
+ }
+ }()
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/generic_test.go b/pkg/local_object_storage/writecache/writecachebadger/generic_test.go
new file mode 100644
index 000000000..08845665f
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/generic_test.go
@@ -0,0 +1,19 @@
+package writecachebadger
+
+import (
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+)
+
+func TestGeneric(t *testing.T) {
+ storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
+ return New(
+ WithLogger(test.NewLogger(t, true)),
+ WithFlushWorkersCount(2),
+ WithPath(t.TempDir()),
+ WithGCInterval(1*time.Second))
+ })
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/get.go b/pkg/local_object_storage/writecache/writecachebadger/get.go
new file mode 100644
index 000000000..42403e551
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/get.go
@@ -0,0 +1,95 @@
+package writecachebadger
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/dgraph-io/badger/v4"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Get returns object from write-cache.
+//
+// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
+func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "writecache.Get",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ obj, err := c.getInternal(addr)
+ return obj, metaerr.Wrap(err)
+}
+
+func (c *cache) getInternal(addr oid.Address) (*objectSDK.Object, error) {
+ found := false
+ storageType := writecache.StorageTypeUndefined
+ startedAt := time.Now()
+ defer func() {
+ c.metrics.Get(time.Since(startedAt), found, storageType)
+ }()
+
+ k := addr2key(addr)
+ value, err := Get(c.db, k[:])
+ if err == nil {
+ obj := objectSDK.New()
+ found = true
+ storageType = writecache.StorageTypeDB
+ return obj, obj.Unmarshal(value)
+ }
+
+ return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
+}
+
+// Head returns object header from write-cache.
+//
+// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
+func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "writecache.Head",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ obj, err := c.getInternal(addr)
+ if err != nil {
+ return nil, metaerr.Wrap(err)
+ }
+
+ return obj.CutPayload(), nil
+}
+
+// Get fetches object from the underlying database.
+// Key should be a stringified address.
+//
+// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in db.
+func Get(db *badger.DB, key []byte) ([]byte, error) {
+ var value []byte
+
+ err := db.View(func(tx *badger.Txn) error {
+ it, err := tx.Get(key)
+ if err != nil {
+ if err == badger.ErrKeyNotFound {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ v, err := it.ValueCopy(nil)
+ if err != nil {
+ return err
+ }
+ value = v
+ return nil
+ })
+
+ return value, metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/iterate.go b/pkg/local_object_storage/writecache/writecachebadger/iterate.go
new file mode 100644
index 000000000..111242048
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/iterate.go
@@ -0,0 +1,32 @@
+package writecachebadger
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/dgraph-io/badger/v4"
+)
+
+// IterateDB iterates over all objects stored in badger.DB instance and passes them to f until error return.
+// It is assumed that db is an underlying database of some WriteCache instance.
+//
+// DB must not be nil and should be opened.
+func IterateDB(db *badger.DB, f func(oid.Address) error) error {
+ return metaerr.Wrap(db.View(func(tx *badger.Txn) error {
+ opts := badger.DefaultIteratorOptions
+ opts.PrefetchValues = false
+ it := tx.NewIterator(opts)
+ for it.Rewind(); it.Valid(); it.Next() {
+ var key internalKey
+ if got, want := len(it.Item().Key()), len(key); got != want {
+ return fmt.Errorf("invalid db key len: got %d, want %d", got, want)
+ }
+ copy(key[:], it.Item().Key())
+ if err := f(key.address()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }))
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/mode.go b/pkg/local_object_storage/writecache/writecachebadger/mode.go
new file mode 100644
index 000000000..03d861831
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/mode.go
@@ -0,0 +1,78 @@
+package writecachebadger
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SetMode sets write-cache mode of operation.
+// When shard is put in read-only mode all objects in memory are flushed to disk
+// and all background jobs are suspended.
+func (c *cache) SetMode(m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
+ trace.WithAttributes(
+ attribute.String("mode", m.String()),
+ ))
+ defer span.End()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ err := c.setMode(ctx, m)
+ if err == nil {
+ c.metrics.SetMode(m)
+ }
+ return err
+}
+
+// setMode applies new mode. Must be called with cache.modeMtx lock taken.
+func (c *cache) setMode(ctx context.Context, m mode.Mode) error {
+ var err error
+ turnOffMeta := m.NoMetabase()
+
+ if turnOffMeta && !c.mode.NoMetabase() {
+ err = c.flush(ctx, true)
+ if err != nil {
+ return err
+ }
+ }
+
+ if c.db != nil {
+ if err = c.db.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache database: %w", err)
+ }
+ }
+
+ // Suspend producers to ensure there are channel send operations in fly.
+ // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
+ // guarantees that there are no in-fly operations.
+ for len(c.flushCh) != 0 {
+ c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
+ time.Sleep(time.Second)
+ }
+
+ if turnOffMeta {
+ c.mode = m
+ return nil
+ }
+
+ if err = c.openStore(m.ReadOnly()); err != nil {
+ return err
+ }
+
+ c.mode = m
+ return nil
+}
+
+// readOnly returns true if current mode is read-only.
+// `c.modeMtx` must be taken.
+func (c *cache) readOnly() bool {
+ return c.mode.ReadOnly()
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/options.go b/pkg/local_object_storage/writecache/writecachebadger/options.go
new file mode 100644
index 000000000..63bfb196c
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/options.go
@@ -0,0 +1,110 @@
+package writecachebadger
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
+)
+
+// Option represents write-cache configuration option.
+type Option func(*options)
+
+type options struct {
+ log *logger.Logger
+ // path is a path to a directory for write-cache.
+ path string
+ // blobstor is the main persistent storage.
+ blobstor writecache.MainStorage
+ // metabase is the metabase instance.
+ metabase writecache.Metabase
+ // maxObjectSize is the maximum size of the object stored in the write-cache.
+ maxObjectSize uint64
+ // workersCount is the number of workers flushing objects in parallel.
+ workersCount int
+ // maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
+ // 1 GiB by default.
+ maxCacheSize uint64
+ // objCounters contains atomic counters for the number of objects stored in cache.
+ objCounters counters
+ // reportError is the function called when encountering disk errors in background workers.
+ reportError func(string, error)
+ // metrics is metrics implementation
+ metrics writecache.Metrics
+ // gcInterval is the interval duration to run the GC cycle.
+ gcInterval time.Duration
+}
+
+// WithLogger sets logger.
+func WithLogger(log *logger.Logger) Option {
+ return func(o *options) {
+ o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
+ }
+}
+
+// WithPath sets path to writecache db.
+func WithPath(path string) Option {
+ return func(o *options) {
+ o.path = path
+ }
+}
+
+// WithBlobstor sets main object storage.
+func WithBlobstor(bs writecache.MainStorage) Option {
+ return func(o *options) {
+ o.blobstor = bs
+ }
+}
+
+// WithMetabase sets metabase.
+func WithMetabase(db writecache.Metabase) Option {
+ return func(o *options) {
+ o.metabase = db
+ }
+}
+
+// WithMaxObjectSize sets maximum object size to be stored in write-cache.
+func WithMaxObjectSize(sz uint64) Option {
+ return func(o *options) {
+ if sz > 0 {
+ o.maxObjectSize = sz
+ }
+ }
+}
+
+func WithFlushWorkersCount(c int) Option {
+ return func(o *options) {
+ if c > 0 {
+ o.workersCount = c
+ }
+ }
+}
+
+// WithMaxCacheSize sets maximum write-cache size in bytes.
+func WithMaxCacheSize(sz uint64) Option {
+ return func(o *options) {
+ o.maxCacheSize = sz
+ }
+}
+
+// WithReportErrorFunc sets error reporting function.
+func WithReportErrorFunc(f func(string, error)) Option {
+ return func(o *options) {
+ o.reportError = f
+ }
+}
+
+// WithMetrics sets metrics implementation.
+func WithMetrics(metrics writecache.Metrics) Option {
+ return func(o *options) {
+ o.metrics = metrics
+ }
+}
+
+// WithGCInterval sets the duration of the interval to run GC cycles.
+func WithGCInterval(d time.Duration) Option {
+ return func(o *options) {
+ o.gcInterval = d
+ }
+}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/writecachebadger/put.go
similarity index 50%
rename from pkg/local_object_storage/writecache/put.go
rename to pkg/local_object_storage/writecache/writecachebadger/put.go
index 2fbf50913..c03a0d336 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/writecachebadger/put.go
@@ -1,13 +1,12 @@
-package writecache
+package writecachebadger
import (
"context"
- "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -20,7 +19,7 @@ import (
// Returns ErrOutOfSpace if saving an object leads to WC's size overflow.
// Returns ErrBigObject if an objects exceeds maximum object size.
func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put",
+ _, span := tracing.StartSpanFromContext(ctx, "writecache.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
@@ -29,61 +28,55 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
startedAt := time.Now()
added := false
- storageType := StorageTypeUndefined
+ storageType := writecache.StorageTypeUndefined
defer func() {
c.metrics.Put(time.Since(startedAt), added, storageType)
}()
- if !c.modeMtx.TryRLock() {
- return common.PutRes{}, ErrNotInitialized
- }
+ c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
if c.readOnly() {
- return common.PutRes{}, ErrReadOnly
- }
- if c.noMetabase() {
- return common.PutRes{}, ErrDegraded
+ return common.PutRes{}, writecache.ErrReadOnly
}
sz := uint64(len(prm.RawData))
if sz > c.maxObjectSize {
- return common.PutRes{}, ErrBigObject
+ return common.PutRes{}, writecache.ErrBigObject
}
- storageType = StorageTypeFSTree
- err := c.putBig(ctx, prm)
+ oi := objectInfo{
+ addr: prm.Address,
+ obj: prm.Object,
+ data: prm.RawData,
+ }
+
+ storageType = writecache.StorageTypeDB
+ err := c.put(oi)
if err == nil {
added = true
}
- return common.PutRes{}, metaerr.Wrap(err)
+ return common.PutRes{}, err
}
-// putBig writes object to FSTree and pushes it to the flush workers queue.
-func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
- if prm.RawData == nil { // foolproof: RawData should be marshalled by shard.
- data, err := prm.Object.Marshal()
- if err != nil {
- return fmt.Errorf("cannot marshal object: %w", err)
- }
- prm.RawData = data
- }
- size := uint64(len(prm.RawData))
- if !c.hasEnoughSpace(size) {
- return ErrOutOfSpace
+// put persists objects to the write-cache database and
+// pushes the to the flush workers queue.
+func (c *cache) put(obj objectInfo) error {
+ cacheSize := c.estimateCacheSize()
+ if c.maxCacheSize < c.incSizeDB(cacheSize) {
+ return writecache.ErrOutOfSpace
}
- _, err := c.fsTree.Put(ctx, prm)
- if err != nil {
- return err
+ wb := c.db.NewWriteBatch()
+ k := addr2key(obj.addr)
+ _ = wb.Set(k[:], obj.data)
+ err := wb.Flush()
+ if err == nil {
+ storagelog.Write(c.log,
+ storagelog.AddressField(obj.addr),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db PUT"),
+ )
+ c.objCounters.IncDB()
}
-
- storagelog.Write(ctx, c.log,
- storagelog.AddressField(prm.Address.EncodeToString()),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("fstree PUT"),
- )
- // counter changed by fstree
- c.estimateCacheSize()
-
- return nil
+ return err
}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/state.go b/pkg/local_object_storage/writecache/writecachebadger/state.go
new file mode 100644
index 000000000..994dfa3d5
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/state.go
@@ -0,0 +1,57 @@
+package writecachebadger
+
+import (
+ "fmt"
+ "math"
+ "sync/atomic"
+
+ "github.com/dgraph-io/badger/v4"
+)
+
+func (c *cache) estimateCacheSize() uint64 {
+ onDiskSize, _ := c.db.EstimateSize(nil)
+ c.metrics.SetEstimateSize(onDiskSize, 0)
+ return onDiskSize
+}
+
+func (c *cache) incSizeDB(sz uint64) uint64 {
+ return sz + c.maxObjectSize
+}
+
+type counters struct {
+ cDB atomic.Uint64
+}
+
+func (x *counters) IncDB() {
+ x.cDB.Add(1)
+}
+
+func (x *counters) DecDB() {
+ x.cDB.Add(math.MaxUint64)
+}
+
+func (x *counters) DB() uint64 {
+ return x.cDB.Load()
+}
+
+func (c *cache) initCounters() error {
+ var inDB uint64
+ err := c.db.View(func(tx *badger.Txn) error {
+ opts := badger.DefaultIteratorOptions
+ opts.PrefetchValues = false
+ it := tx.NewIterator(opts)
+ defer it.Close()
+ for it.Rewind(); it.Valid(); it.Next() {
+ inDB++
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("could not read write-cache DB counter: %w", err)
+ }
+
+ c.objCounters.cDB.Store(inDB)
+ c.metrics.SetActualCounters(inDB, 0)
+
+ return nil
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/storage.go b/pkg/local_object_storage/writecache/writecachebadger/storage.go
new file mode 100644
index 000000000..9ff54bee0
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/storage.go
@@ -0,0 +1,91 @@
+package writecachebadger
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/dgraph-io/badger/v4"
+ "go.uber.org/zap"
+)
+
+// store represents persistent storage with in-memory LRU cache
+// for flushed items on top of it.
+type store struct {
+ db *badger.DB
+}
+
+type internalKey [len(cid.ID{}) + len(oid.ID{})]byte
+
+func (k internalKey) address() oid.Address {
+ var addr oid.Address
+ var cnr cid.ID
+ var obj oid.ID
+ copy(cnr[:], k[:len(cnr)])
+ copy(obj[:], k[len(cnr):])
+ addr.SetContainer(cnr)
+ addr.SetObject(obj)
+ return addr
+}
+
+func addr2key(addr oid.Address) internalKey {
+ var key internalKey
+ cnr, obj := addr.Container(), addr.Object()
+ copy(key[:len(cnr)], cnr[:])
+ copy(key[len(cnr):], obj[:])
+ return key
+}
+
+const dbName = "small.badger"
+
+func (c *cache) openStore(readOnly bool) error {
+ err := util.MkdirAllX(c.path, os.ModePerm)
+ if err != nil {
+ return err
+ }
+
+ c.db, err = OpenDB(filepath.Join(c.path, dbName), readOnly, c.log)
+ if err != nil {
+ return fmt.Errorf("could not open database: %w", err)
+ }
+
+ return nil
+}
+
+func (c *cache) deleteFromDB(keys []internalKey) []internalKey {
+ if len(keys) == 0 {
+ return keys
+ }
+
+ wb := c.db.NewWriteBatch()
+
+ var errorIndex int
+ for errorIndex = range keys {
+ if err := wb.Delete(keys[errorIndex][:]); err != nil {
+ break
+ }
+ }
+
+ for i := 0; i < errorIndex; i++ {
+ c.objCounters.DecDB()
+ c.metrics.Evict(writecache.StorageTypeDB)
+ storagelog.Write(c.log,
+ storagelog.AddressField(keys[i]),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db DELETE"),
+ )
+ }
+
+ if err := wb.Flush(); err != nil {
+ c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
+ }
+
+ copy(keys, keys[errorIndex:])
+ return keys[:len(keys)-errorIndex]
+}
diff --git a/pkg/local_object_storage/writecache/writecachebadger/util.go b/pkg/local_object_storage/writecache/writecachebadger/util.go
new file mode 100644
index 000000000..e6079e370
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebadger/util.go
@@ -0,0 +1,39 @@
+package writecachebadger
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/dgraph-io/badger/v4"
+ badgeroptions "github.com/dgraph-io/badger/v4/options"
+)
+
+// OpenDB opens a badger instance for write-cache. Opens in read-only mode if ro is true.
+func OpenDB(p string, ro bool, l *logger.Logger) (*badger.DB, error) {
+ return badger.Open(badger.DefaultOptions(p).
+ WithReadOnly(ro).
+ WithSyncWrites(true).
+ WithCompression(badgeroptions.None).
+ WithLoggingLevel(badger.ERROR).
+ WithLogger(badgerLoggerWrapper{l}))
+}
+
+type badgerLoggerWrapper struct {
+ l *logger.Logger
+}
+
+func (w badgerLoggerWrapper) Errorf(msg string, args ...any) {
+ w.l.Error(fmt.Sprintf(msg, args...))
+}
+
+func (w badgerLoggerWrapper) Warningf(msg string, args ...any) {
+ w.l.Warn(fmt.Sprintf(msg, args...))
+}
+
+func (w badgerLoggerWrapper) Infof(msg string, args ...any) {
+ w.l.Info(fmt.Sprintf(msg, args...))
+}
+
+func (w badgerLoggerWrapper) Debugf(msg string, args ...any) {
+ w.l.Debug(fmt.Sprintf(msg, args...))
+}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/cachebbolt.go b/pkg/local_object_storage/writecache/writecachebbolt/cachebbolt.go
new file mode 100644
index 000000000..363ee8448
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/cachebbolt.go
@@ -0,0 +1,147 @@
+package writecachebbolt
+
+import (
+ "context"
+ "os"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type cache struct {
+ options
+
+ // mtx protects statistics, counters and compressFlags.
+ mtx sync.RWMutex
+
+ mode mode.Mode
+ modeMtx sync.RWMutex
+
+ // compressFlags maps address of a big object to boolean value indicating
+ // whether object should be compressed.
+ compressFlags map[string]struct{}
+
+ // flushCh is a channel with objects to flush.
+ flushCh chan objectInfo
+ // closeCh is close channel, protected by modeMtx.
+ closeCh chan struct{}
+ // wg is a wait group for flush workers.
+ wg sync.WaitGroup
+ // store contains underlying database.
+ store
+ // fsTree contains big files stored directly on file-system.
+ fsTree *fstree.FSTree
+}
+
+// wcStorageType is used for write-cache operations logging.
+const wcStorageType = "write-cache"
+
+type objectInfo struct {
+ addr string
+ data []byte
+ obj *objectSDK.Object
+}
+
+const (
+ defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
+ defaultSmallObjectSize = 32 * 1024 // 32 KiB
+ defaultMaxCacheSize = 1 << 30 // 1 GiB
+)
+
+var (
+ defaultBucket = []byte{0}
+)
+
+// New creates new writecache instance.
+func New(opts ...Option) writecache.Cache {
+ c := &cache{
+ flushCh: make(chan objectInfo),
+ mode: mode.ReadWrite,
+
+ compressFlags: make(map[string]struct{}),
+ options: options{
+ log: &logger.Logger{Logger: zap.NewNop()},
+ maxObjectSize: defaultMaxObjectSize,
+ smallObjectSize: defaultSmallObjectSize,
+ workersCount: defaultFlushWorkersCount,
+ maxCacheSize: defaultMaxCacheSize,
+ maxBatchSize: bbolt.DefaultMaxBatchSize,
+ maxBatchDelay: bbolt.DefaultMaxBatchDelay,
+ openFile: os.OpenFile,
+ metrics: writecache.DefaultMetrics(),
+ },
+ }
+
+ for i := range opts {
+ opts[i](&c.options)
+ }
+
+ return c
+}
+
+// SetLogger sets logger. It is used after the shard ID was generated to use it in logs.
+func (c *cache) SetLogger(l *logger.Logger) {
+ c.log = l
+}
+
+func (c *cache) DumpInfo() writecache.Info {
+ return writecache.Info{
+ Path: c.path,
+ }
+}
+
+// Open opens and initializes database. Reads object counters from the ObjectCounters instance.
+func (c *cache) Open(_ context.Context, readOnly bool) error {
+ err := c.openStore(readOnly)
+ if err != nil {
+ return metaerr.Wrap(err)
+ }
+
+ // Opening after Close is done during maintenance mode,
+ // thus we need to create a channel here.
+ c.closeCh = make(chan struct{})
+
+ return metaerr.Wrap(c.initCounters())
+}
+
+// Init runs necessary services.
+func (c *cache) Init() error {
+ c.metrics.SetMode(c.mode)
+ c.runFlushLoop()
+ return nil
+}
+
+// Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op.
+func (c *cache) Close() error {
+ // We cannot lock mutex for the whole operation duration
+ // because it is taken by some background workers, so `wg.Wait()` is done without modeMtx.
+ c.modeMtx.Lock()
+ if c.closeCh != nil {
+ close(c.closeCh)
+ }
+ c.mode = mode.DegradedReadOnly // prevent new operations from being processed
+ c.modeMtx.Unlock()
+
+ c.wg.Wait()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ c.closeCh = nil
+ var err error
+ if c.db != nil {
+ err = c.db.Close()
+ if err != nil {
+ c.db = nil
+ }
+ }
+ c.metrics.Close()
+ return nil
+}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/writecachebbolt/delete.go
similarity index 54%
rename from pkg/local_object_storage/writecache/delete.go
rename to pkg/local_object_storage/writecache/writecachebbolt/delete.go
index 94a0a40db..e4d9c219f 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/writecachebbolt/delete.go
@@ -1,14 +1,17 @@
-package writecache
+package writecachebbolt
import (
"context"
+ "math"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -16,8 +19,6 @@ import (
// Delete removes object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache.
-// Returns ErrNotInitialized if write-cache has not been initialized yet.
-// Returns ErrDegraded if write-cache is in DEGRADED mode.
func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete",
trace.WithAttributes(
@@ -26,33 +27,62 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
defer span.End()
deleted := false
- storageType := StorageTypeUndefined
+ storageType := writecache.StorageTypeUndefined
startedAt := time.Now()
defer func() {
c.metrics.Delete(time.Since(startedAt), deleted, storageType)
}()
- if !c.modeMtx.TryRLock() {
- return ErrNotInitialized
- }
+ c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
if c.readOnly() {
- return ErrReadOnly
- }
- if c.noMetabase() {
- return ErrDegraded
+ return writecache.ErrReadOnly
}
- storageType = StorageTypeFSTree
+ saddr := addr.EncodeToString()
+
+ var dataSize int
+ _ = c.db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ dataSize = len(b.Get([]byte(saddr)))
+ return nil
+ })
+
+ if dataSize > 0 {
+ storageType = writecache.StorageTypeDB
+ var recordDeleted bool
+ err := c.db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ key := []byte(saddr)
+ recordDeleted = b.Get(key) != nil
+ err := b.Delete(key)
+ return err
+ })
+ if err != nil {
+ return err
+ }
+ storagelog.Write(c.log,
+ storagelog.AddressField(saddr),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db DELETE"),
+ )
+ if recordDeleted {
+ c.objCounters.cDB.Add(math.MaxUint64)
+ c.estimateCacheSize()
+ }
+ deleted = true
+ return nil
+ }
+
+ storageType = writecache.StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
- storagelog.Write(ctx, c.log,
- storagelog.AddressField(addr.EncodeToString()),
+ storagelog.Write(c.log,
+ storagelog.AddressField(saddr),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
)
deleted = true
- // counter changed by fstree
c.estimateCacheSize()
}
return metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/flush.go b/pkg/local_object_storage/writecache/writecachebbolt/flush.go
new file mode 100644
index 000000000..89b26cd64
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/flush.go
@@ -0,0 +1,340 @@
+package writecachebbolt
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+const (
+ // flushBatchSize is amount of keys which will be read from cache to be flushed
+ // to the main storage. It is used to reduce contention between cache put
+ // and cache persist.
+ flushBatchSize = 512
+ // defaultFlushWorkersCount is number of workers for putting objects in main storage.
+ defaultFlushWorkersCount = 20
+ // defaultFlushInterval is default time interval between successive flushes.
+ defaultFlushInterval = time.Second
+)
+
+// runFlushLoop starts background workers which periodically flush objects to the blobstor.
+func (c *cache) runFlushLoop() {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ ch := c.closeCh
+ c.wg.Add(1)
+ go func() {
+ <-ch
+ cancel()
+ c.wg.Done()
+ }()
+
+ for i := 0; i < c.workersCount; i++ {
+ c.wg.Add(1)
+ go c.workerFlushSmall()
+ }
+
+ c.wg.Add(1)
+ go func() {
+ c.workerFlushBig(ctx)
+ c.wg.Done()
+ }()
+
+ c.wg.Add(1)
+ go func() {
+ defer c.wg.Done()
+
+ tt := time.NewTimer(defaultFlushInterval)
+ defer tt.Stop()
+
+ for {
+ select {
+ case <-tt.C:
+ c.flushSmallObjects()
+ tt.Reset(defaultFlushInterval)
+ case <-c.closeCh:
+ return
+ }
+ }
+ }()
+}
+
+func (c *cache) flushSmallObjects() {
+ var lastKey []byte
+ for {
+ select {
+ case <-c.closeCh:
+ return
+ default:
+ }
+
+ var m []objectInfo
+
+ c.modeMtx.RLock()
+ if c.readOnly() {
+ c.modeMtx.RUnlock()
+ time.Sleep(time.Second)
+ continue
+ }
+
+ // We put objects in batches of fixed size to not interfere with main put cycle a lot.
+ _ = c.db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ cs := b.Cursor()
+
+ var k, v []byte
+
+ if len(lastKey) == 0 {
+ k, v = cs.First()
+ } else {
+ k, v = cs.Seek(lastKey)
+ if bytes.Equal(k, lastKey) {
+ k, v = cs.Next()
+ }
+ }
+
+ for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
+ if len(lastKey) == len(k) {
+ copy(lastKey, k)
+ } else {
+ lastKey = slice.Copy(k)
+ }
+
+ m = append(m, objectInfo{
+ addr: string(k),
+ data: slice.Copy(v),
+ })
+ }
+ return nil
+ })
+
+ var count int
+ for i := range m {
+ obj := objectSDK.New()
+ if err := obj.Unmarshal(m[i].data); err != nil {
+ continue
+ }
+ m[i].obj = obj
+
+ count++
+ select {
+ case c.flushCh <- m[i]:
+ case <-c.closeCh:
+ c.modeMtx.RUnlock()
+ return
+ }
+ }
+
+ if count == 0 {
+ c.modeMtx.RUnlock()
+ break
+ }
+
+ c.modeMtx.RUnlock()
+
+ c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
+ zap.Int("count", count),
+ zap.String("start", base58.Encode(lastKey)))
+ }
+}
+
+func (c *cache) workerFlushBig(ctx context.Context) {
+ tick := time.NewTicker(defaultFlushInterval * 10)
+ for {
+ select {
+ case <-tick.C:
+ c.modeMtx.RLock()
+ if c.readOnly() {
+ c.modeMtx.RUnlock()
+ break
+ }
+
+ _ = c.flushFSTree(ctx, true)
+
+ c.modeMtx.RUnlock()
+ case <-c.closeCh:
+ return
+ }
+ }
+}
+
+func (c *cache) reportFlushError(msg string, addr string, err error) {
+ if c.reportError != nil {
+ c.reportError(msg, err)
+ } else {
+ c.log.Error(msg,
+ zap.String("address", addr),
+ zap.Error(err))
+ }
+}
+
+func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
+ var prm common.IteratePrm
+ prm.IgnoreErrors = ignoreErrors
+ prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
+ sAddr := addr.EncodeToString()
+
+ data, err := f()
+ if err != nil {
+ c.reportFlushError(logs.FSTreeCantReadFile, sAddr, metaerr.Wrap(err))
+ if ignoreErrors {
+ return nil
+ }
+ return err
+ }
+
+ var obj objectSDK.Object
+ err = obj.Unmarshal(data)
+ if err != nil {
+ c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err))
+ if ignoreErrors {
+ return nil
+ }
+ return err
+ }
+
+ err = c.flushObject(ctx, &obj, data, writecache.StorageTypeFSTree)
+ if err != nil {
+ if ignoreErrors {
+ return nil
+ }
+ return err
+ }
+
+ c.deleteFromDisk(ctx, []string{sAddr})
+ return nil
+ }
+
+ _, err := c.fsTree.Iterate(ctx, prm)
+ return err
+}
+
+// workerFlushSmall writes small objects to the main storage.
+func (c *cache) workerFlushSmall() {
+ defer c.wg.Done()
+
+ var objInfo objectInfo
+ for {
+ // Give priority to direct put.
+ select {
+ case objInfo = <-c.flushCh:
+ case <-c.closeCh:
+ return
+ }
+
+ err := c.flushObject(context.TODO(), objInfo.obj, objInfo.data, writecache.StorageTypeDB)
+ if err != nil {
+ // Error is handled in flushObject.
+ continue
+ }
+
+ c.deleteFromDB(objInfo.addr)
+ }
+}
+
+// flushObject is used to write object directly to the main storage.
+func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st writecache.StorageType) error {
+ var err error
+
+ defer func() {
+ c.metrics.Flush(err == nil, st)
+ }()
+
+ addr := objectCore.AddressOf(obj)
+
+ var prm common.PutPrm
+ prm.Object = obj
+ prm.RawData = data
+
+ res, err := c.blobstor.Put(ctx, prm)
+ if err != nil {
+ if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
+ !errors.Is(err, blobstor.ErrNoPlaceFound) {
+ c.reportFlushError(logs.FSTreeCantFushObjectBlobstor,
+ addr.EncodeToString(), err)
+ }
+ return err
+ }
+
+ var updPrm meta.UpdateStorageIDPrm
+ updPrm.SetAddress(addr)
+ updPrm.SetStorageID(res.StorageID)
+
+ _, err = c.metabase.UpdateStorageID(updPrm)
+ if err != nil {
+ c.reportFlushError(logs.FSTreeCantUpdateID,
+ addr.EncodeToString(), err)
+ }
+ return err
+}
+
+// Flush flushes all objects from the write-cache to the main storage.
+// Write-cache must be in readonly mode to ensure correctness of an operation and
+// to prevent interference with background flush workers.
+func (c *cache) Flush(ctx context.Context, ignoreErrors bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", ignoreErrors),
+ ))
+ defer span.End()
+
+ c.modeMtx.RLock()
+ defer c.modeMtx.RUnlock()
+
+ return c.flush(ctx, ignoreErrors)
+}
+
+func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
+ if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
+ return err
+ }
+
+ return c.db.View(func(tx *bbolt.Tx) error {
+ var addr oid.Address
+
+ b := tx.Bucket(defaultBucket)
+ cs := b.Cursor()
+ for k, data := cs.Seek(nil); k != nil; k, data = cs.Next() {
+ sa := string(k)
+ if err := addr.DecodeString(sa); err != nil {
+ c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, sa, metaerr.Wrap(err))
+ if ignoreErrors {
+ continue
+ }
+ return err
+ }
+
+ var obj objectSDK.Object
+ if err := obj.Unmarshal(data); err != nil {
+ c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, sa, metaerr.Wrap(err))
+ if ignoreErrors {
+ continue
+ }
+ return err
+ }
+
+ if err := c.flushObject(ctx, &obj, data, writecache.StorageTypeDB); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/flush_test.go b/pkg/local_object_storage/writecache/writecachebbolt/flush_test.go
new file mode 100644
index 000000000..533cec1d2
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/flush_test.go
@@ -0,0 +1,108 @@
+package writecachebbolt
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "sync/atomic"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+func TestFlush(t *testing.T) {
+ testlogger := test.NewLogger(t, true)
+
+ createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
+ return New(
+ append([]Option{
+ WithLogger(testlogger),
+ WithPath(filepath.Join(t.TempDir(), "writecache")),
+ WithSmallObjectSize(smallSize),
+ WithMetabase(mb),
+ WithBlobstor(bs),
+ }, opts...)...)
+ }
+
+ errCountOpt := func() (Option, *atomic.Uint32) {
+ cnt := &atomic.Uint32{}
+ return WithReportErrorFunc(func(msg string, err error) {
+ cnt.Add(1)
+ testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+ }), cnt
+ }
+
+ failures := []writecachetest.TestFailureInjector[Option]{
+ {
+ Desc: "db, invalid address",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ obj := testutil.GenerateObject()
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ return b.Put([]byte{1, 2, 3}, data)
+ }))
+ },
+ },
+ {
+ Desc: "db, invalid object",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ k := []byte(oidtest.Address().EncodeToString())
+ v := []byte{1, 2, 3}
+ return b.Put(k, v)
+ }))
+ },
+ },
+ {
+ Desc: "fs, read error",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ obj := testutil.GenerateObject()
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+
+ var prm common.PutPrm
+ prm.Address = objectCore.AddressOf(obj)
+ prm.RawData = data
+
+ _, err = c.fsTree.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
+ p = filepath.Join(c.fsTree.RootPath, p[:1], p[1:])
+
+ _, err = os.Stat(p) // sanity check
+ require.NoError(t, err)
+ require.NoError(t, os.Truncate(p, 0)) // corrupt the file contents, so that it can't be unmarshalled
+ },
+ },
+ {
+ Desc: "fs, invalid object",
+ InjectFn: func(t *testing.T, wc writecache.Cache) {
+ c := wc.(*cache)
+ var prm common.PutPrm
+ prm.Address = oidtest.Address()
+ prm.RawData = []byte{1, 2, 3}
+ _, err := c.fsTree.Put(context.Background(), prm)
+ require.NoError(t, err)
+ },
+ },
+ }
+
+ writecachetest.TestFlush(t, createCacheFn, errCountOpt, failures...)
+}
diff --git a/pkg/local_object_storage/writecache/generic_test.go b/pkg/local_object_storage/writecache/writecachebbolt/generic_test.go
similarity index 84%
rename from pkg/local_object_storage/writecache/generic_test.go
rename to pkg/local_object_storage/writecache/writecachebbolt/generic_test.go
index 2913ed13b..7eadd1afc 100644
--- a/pkg/local_object_storage/writecache/generic_test.go
+++ b/pkg/local_object_storage/writecache/writecachebbolt/generic_test.go
@@ -1,4 +1,4 @@
-package writecache
+package writecachebbolt
import (
"testing"
@@ -10,7 +10,7 @@ import (
func TestGeneric(t *testing.T) {
storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New(
- WithLogger(test.NewLogger(t)),
+ WithLogger(test.NewLogger(t, true)),
WithFlushWorkersCount(2),
WithPath(t.TempDir()))
})
diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/writecachebbolt/get.go
similarity index 77%
rename from pkg/local_object_storage/writecache/get.go
rename to pkg/local_object_storage/writecache/writecachebbolt/get.go
index c0847a65f..dede1fece 100644
--- a/pkg/local_object_storage/writecache/get.go
+++ b/pkg/local_object_storage/writecache/writecachebbolt/get.go
@@ -1,17 +1,18 @@
-package writecache
+package writecachebbolt
import (
- "bytes"
"context"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -29,33 +30,33 @@ func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, e
))
defer span.End()
- if !c.modeMtx.TryRLock() {
- return nil, ErrNotInitialized
- }
- defer c.modeMtx.RUnlock()
- if c.mode.NoMetabase() {
- return nil, ErrDegraded
- }
-
- obj, err := c.getInternal(ctx, addr)
+ obj, err := c.getInternal(ctx, saddr, addr)
return obj, metaerr.Wrap(err)
}
-func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address) (*objectSDK.Object, error) {
found := false
- storageType := StorageTypeUndefined
+ storageType := writecache.StorageTypeUndefined
startedAt := time.Now()
defer func() {
c.metrics.Get(time.Since(startedAt), found, storageType)
}()
+ value, err := Get(c.db, []byte(saddr))
+ if err == nil {
+ obj := objectSDK.New()
+ found = true
+ storageType = writecache.StorageTypeDB
+ return obj, obj.Unmarshal(value)
+ }
+
res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr})
if err != nil {
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
found = true
- storageType = StorageTypeFSTree
+ storageType = writecache.StorageTypeFSTree
return res.Object, nil
}
@@ -65,21 +66,13 @@ func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.O
func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
saddr := addr.EncodeToString()
- ctx, span := tracing.StartSpanFromContext(ctx, "Head",
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Head",
trace.WithAttributes(
attribute.String("address", saddr),
))
defer span.End()
- if !c.modeMtx.TryRLock() {
- return nil, ErrNotInitialized
- }
- defer c.modeMtx.RUnlock()
- if c.mode.NoMetabase() {
- return nil, ErrDegraded
- }
-
- obj, err := c.getInternal(ctx, addr)
+ obj, err := c.getInternal(ctx, saddr, addr)
if err != nil {
return nil, metaerr.Wrap(err)
}
@@ -92,9 +85,6 @@ func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object,
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in db.
func Get(db *bbolt.DB, key []byte) ([]byte, error) {
- if db == nil {
- return nil, ErrNotInitialized
- }
var value []byte
err := db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(defaultBucket)
@@ -105,7 +95,7 @@ func Get(db *bbolt.DB, key []byte) ([]byte, error) {
if value == nil {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- value = bytes.Clone(value)
+ value = slice.Copy(value)
return nil
})
return value, metaerr.Wrap(err)
diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/writecachebbolt/iterate.go
similarity index 87%
rename from pkg/local_object_storage/writecache/iterate.go
rename to pkg/local_object_storage/writecache/writecachebbolt/iterate.go
index e369fbd50..530db42a6 100644
--- a/pkg/local_object_storage/writecache/iterate.go
+++ b/pkg/local_object_storage/writecache/writecachebbolt/iterate.go
@@ -1,4 +1,4 @@
-package writecache
+package writecachebbolt
import (
"errors"
@@ -27,10 +27,10 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
var addr oid.Address
- return b.ForEach(func(k, _ []byte) error {
+ return b.ForEach(func(k, v []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
- return fmt.Errorf("parse object address: %w", err)
+ return fmt.Errorf("could not parse object address: %w", err)
}
return f(addr)
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/mode.go b/pkg/local_object_storage/writecache/writecachebbolt/mode.go
new file mode 100644
index 000000000..b187996a1
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/mode.go
@@ -0,0 +1,78 @@
+package writecachebbolt
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SetMode sets write-cache mode of operation.
+// When shard is put in read-only mode all objects in memory are flushed to disk
+// and all background jobs are suspended.
+func (c *cache) SetMode(m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
+ trace.WithAttributes(
+ attribute.String("mode", m.String()),
+ ))
+ defer span.End()
+
+ c.modeMtx.Lock()
+ defer c.modeMtx.Unlock()
+
+ err := c.setMode(ctx, m)
+ if err == nil {
+ c.metrics.SetMode(m)
+ }
+ return err
+}
+
+// setMode applies new mode. Must be called with cache.modeMtx lock taken.
+func (c *cache) setMode(ctx context.Context, m mode.Mode) error {
+ var err error
+ turnOffMeta := m.NoMetabase()
+
+ if turnOffMeta && !c.mode.NoMetabase() {
+ err = c.flush(ctx, true)
+ if err != nil {
+ return err
+ }
+ }
+
+ if c.db != nil {
+ if err = c.db.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache database: %w", err)
+ }
+ }
+
+ // Suspend producers to ensure there are channel send operations in fly.
+ // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
+ // guarantees that there are no in-fly operations.
+ for len(c.flushCh) != 0 {
+ c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
+ time.Sleep(time.Second)
+ }
+
+ if turnOffMeta {
+ c.mode = m
+ return nil
+ }
+
+ if err = c.openStore(m.ReadOnly()); err != nil {
+ return err
+ }
+
+ c.mode = m
+ return nil
+}
+
+// readOnly returns true if current mode is read-only.
+// `c.modeMtx` must be taken.
+func (c *cache) readOnly() bool {
+ return c.mode.ReadOnly()
+}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/writecachebbolt/options.go
similarity index 57%
rename from pkg/local_object_storage/writecache/options.go
rename to pkg/local_object_storage/writecache/writecachebbolt/options.go
index a4f98ad06..d8eedfc79 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/writecachebbolt/options.go
@@ -1,10 +1,13 @@
-package writecache
+package writecachebbolt
import (
- "context"
+ "io/fs"
+ "os"
+ "time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Option represents write-cache configuration option.
@@ -15,37 +18,38 @@ type options struct {
// path is a path to a directory for write-cache.
path string
// blobstor is the main persistent storage.
- blobstor MainStorage
+ blobstor writecache.MainStorage
// metabase is the metabase instance.
- metabase Metabase
+ metabase writecache.Metabase
// maxObjectSize is the maximum size of the object stored in the write-cache.
maxObjectSize uint64
+ // smallObjectSize is the maximum size of the object stored in the database.
+ smallObjectSize uint64
// workersCount is the number of workers flushing objects in parallel.
workersCount int
// maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
// 1 GiB by default.
maxCacheSize uint64
- // maxCacheCount is the maximum total count of all object saved in cache.
- // 0 (no limit) by default.
- maxCacheCount uint64
+ // objCounters contains atomic counters for the number of objects stored in cache.
+ objCounters counters
+ // maxBatchSize is the maximum batch size for the small object database.
+ maxBatchSize int
+ // maxBatchDelay is the maximum batch wait time for the small object database.
+ maxBatchDelay time.Duration
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
- reportError func(context.Context, string, error)
+ reportError func(string, error)
+ // openFile is the function called internally by bbolt to open database files. Useful for hermetic testing.
+ openFile func(string, int, fs.FileMode) (*os.File, error)
// metrics is metrics implementation
- metrics Metrics
- // disableBackgroundFlush is for testing purposes only.
- disableBackgroundFlush bool
- // flushSizeLimit is total size of flushing objects.
- flushSizeLimit uint64
- // qosLimiter used to limit flush RPS.
- qosLimiter qos.Limiter
+ metrics writecache.Metrics
}
// WithLogger sets logger.
func WithLogger(log *logger.Logger) Option {
return func(o *options) {
- o.log = log
+ o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
}
}
@@ -57,14 +61,14 @@ func WithPath(path string) Option {
}
// WithBlobstor sets main object storage.
-func WithBlobstor(bs MainStorage) Option {
+func WithBlobstor(bs writecache.MainStorage) Option {
return func(o *options) {
o.blobstor = bs
}
}
// WithMetabase sets metabase.
-func WithMetabase(db Metabase) Option {
+func WithMetabase(db writecache.Metabase) Option {
return func(o *options) {
o.metabase = db
}
@@ -79,6 +83,15 @@ func WithMaxObjectSize(sz uint64) Option {
}
}
+// WithSmallObjectSize sets maximum object size to be stored in write-cache.
+func WithSmallObjectSize(sz uint64) Option {
+ return func(o *options) {
+ if sz > 0 {
+ o.smallObjectSize = sz
+ }
+ }
+}
+
func WithFlushWorkersCount(c int) Option {
return func(o *options) {
if c > 0 {
@@ -94,10 +107,21 @@ func WithMaxCacheSize(sz uint64) Option {
}
}
-// WithMaxCacheCount sets maximum write-cache objects count.
-func WithMaxCacheCount(v uint64) Option {
+// WithMaxBatchSize sets max batch size for the small object database.
+func WithMaxBatchSize(sz int) Option {
return func(o *options) {
- o.maxCacheCount = v
+ if sz > 0 {
+ o.maxBatchSize = sz
+ }
+ }
+}
+
+// WithMaxBatchDelay sets max batch delay for the small object database.
+func WithMaxBatchDelay(d time.Duration) Option {
+ return func(o *options) {
+ if d > 0 {
+ o.maxBatchDelay = d
+ }
}
}
@@ -112,35 +136,22 @@ func WithNoSync(noSync bool) Option {
}
// WithReportErrorFunc sets error reporting function.
-func WithReportErrorFunc(f func(context.Context, string, error)) Option {
+func WithReportErrorFunc(f func(string, error)) Option {
return func(o *options) {
o.reportError = f
}
}
+// WithOpenFile sets the OpenFile function to use internally by bolt. Useful for hermetic testing.
+func WithOpenFile(f func(string, int, fs.FileMode) (*os.File, error)) Option {
+ return func(o *options) {
+ o.openFile = f
+ }
+}
+
// WithMetrics sets metrics implementation.
-func WithMetrics(metrics Metrics) Option {
+func WithMetrics(metrics writecache.Metrics) Option {
return func(o *options) {
o.metrics = metrics
}
}
-
-// WithDisableBackgroundFlush disables background flush, for testing purposes only.
-func WithDisableBackgroundFlush() Option {
- return func(o *options) {
- o.disableBackgroundFlush = true
- }
-}
-
-// WithFlushSizeLimit sets flush size limit.
-func WithFlushSizeLimit(v uint64) Option {
- return func(o *options) {
- o.flushSizeLimit = v
- }
-}
-
-func WithQoSLimiter(l qos.Limiter) Option {
- return func(o *options) {
- o.qosLimiter = l
- }
-}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/put.go b/pkg/local_object_storage/writecache/writecachebbolt/put.go
new file mode 100644
index 000000000..1d9957219
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/put.go
@@ -0,0 +1,137 @@
+package writecachebbolt
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var (
+ // ErrBigObject is returned when object is too big to be placed in cache.
+ ErrBigObject = errors.New("too big object")
+ // ErrOutOfSpace is returned when there is no space left to put a new object.
+ ErrOutOfSpace = errors.New("no space left in the write cache")
+)
+
+// Put puts object to write-cache.
+//
+// Returns ErrReadOnly if write-cache is in R/O mode.
+// Returns ErrNotInitialized if write-cache has not been initialized yet.
+// Returns ErrOutOfSpace if saving an object leads to WC's size overflow.
+// Returns ErrBigObject if an objects exceeds maximum object size.
+func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
+ startedAt := time.Now()
+ added := false
+ storageType := writecache.StorageTypeUndefined
+ defer func() {
+ c.metrics.Put(time.Since(startedAt), added, storageType)
+ }()
+
+ c.modeMtx.RLock()
+ defer c.modeMtx.RUnlock()
+ if c.readOnly() {
+ return common.PutRes{}, writecache.ErrReadOnly
+ }
+
+ sz := uint64(len(prm.RawData))
+ if sz > c.maxObjectSize {
+ return common.PutRes{}, ErrBigObject
+ }
+
+ oi := objectInfo{
+ addr: prm.Address.EncodeToString(),
+ obj: prm.Object,
+ data: prm.RawData,
+ }
+
+ if sz <= c.smallObjectSize {
+ storageType = writecache.StorageTypeDB
+ err := c.putSmall(oi)
+ if err == nil {
+ added = true
+ }
+ return common.PutRes{}, err
+ }
+
+ storageType = writecache.StorageTypeFSTree
+ err := c.putBig(ctx, oi.addr, prm)
+ if err == nil {
+ added = true
+ }
+ return common.PutRes{}, metaerr.Wrap(err)
+}
+
+// putSmall persists small objects to the write-cache database and
+// pushes the to the flush workers queue.
+func (c *cache) putSmall(obj objectInfo) error {
+ cacheSize := c.estimateCacheSize()
+ if c.maxCacheSize < c.incSizeDB(cacheSize) {
+ return ErrOutOfSpace
+ }
+
+ var newRecord bool
+ err := c.db.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ key := []byte(obj.addr)
+ newRecord = b.Get(key) == nil
+ if newRecord {
+ return b.Put(key, obj.data)
+ }
+ return nil
+ })
+ if err == nil {
+ storagelog.Write(c.log,
+ storagelog.AddressField(obj.addr),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db PUT"),
+ )
+ if newRecord {
+ c.objCounters.cDB.Add(1)
+ c.estimateCacheSize()
+ }
+ }
+ return err
+}
+
+// putBig writes object to FSTree and pushes it to the flush workers queue.
+func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error {
+ cacheSz := c.estimateCacheSize()
+ if c.maxCacheSize < c.incSizeFS(cacheSz) {
+ return ErrOutOfSpace
+ }
+
+ _, err := c.fsTree.Put(ctx, prm)
+ if err != nil {
+ return err
+ }
+
+ if compressor := c.blobstor.Compressor(); compressor != nil && compressor.NeedsCompression(prm.Object) {
+ c.mtx.Lock()
+ c.compressFlags[addr] = struct{}{}
+ c.mtx.Unlock()
+ }
+ storagelog.Write(c.log,
+ storagelog.AddressField(addr),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("fstree PUT"),
+ )
+ c.estimateCacheSize()
+
+ return nil
+}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/state.go b/pkg/local_object_storage/writecache/writecachebbolt/state.go
new file mode 100644
index 000000000..91d54b0ea
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/state.go
@@ -0,0 +1,76 @@
+package writecachebbolt
+
+import (
+ "fmt"
+ "math"
+ "sync/atomic"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "go.etcd.io/bbolt"
+)
+
+func (c *cache) estimateCacheSize() uint64 {
+ dbCount := c.objCounters.DB()
+ fsCount := c.objCounters.FS()
+ if fsCount > 0 {
+ fsCount-- //db file
+ }
+ dbSize := dbCount * c.smallObjectSize
+ fsSize := fsCount * c.maxObjectSize
+ c.metrics.SetEstimateSize(dbSize, fsSize)
+ c.metrics.SetActualCounters(dbCount, fsCount)
+ return dbSize + fsSize
+}
+
+func (c *cache) incSizeDB(sz uint64) uint64 {
+ return sz + c.smallObjectSize
+}
+
+func (c *cache) incSizeFS(sz uint64) uint64 {
+ return sz + c.maxObjectSize
+}
+
+var _ fstree.FileCounter = &counters{}
+
+type counters struct {
+ cDB, cFS atomic.Uint64
+}
+
+func (x *counters) DB() uint64 {
+ return x.cDB.Load()
+}
+
+func (x *counters) FS() uint64 {
+ return x.cFS.Load()
+}
+
+// Set implements fstree.ObjectCounter.
+func (x *counters) Set(v uint64) {
+ x.cFS.Store(v)
+}
+
+// Inc implements fstree.ObjectCounter.
+func (x *counters) Inc() {
+ x.cFS.Add(1)
+}
+
+// Dec implements fstree.ObjectCounter.
+func (x *counters) Dec() {
+ x.cFS.Add(math.MaxUint64)
+}
+
+func (c *cache) initCounters() error {
+ var inDB uint64
+ err := c.db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ if b != nil {
+ inDB = uint64(b.Stats().KeyN)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("could not read write-cache DB counter: %w", err)
+ }
+ c.objCounters.cDB.Store(inDB)
+ return nil
+}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/storage.go b/pkg/local_object_storage/writecache/writecachebbolt/storage.go
new file mode 100644
index 000000000..bbd66d57c
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/storage.go
@@ -0,0 +1,130 @@
+package writecachebbolt
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "os"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+// store represents persistent storage with in-memory LRU cache
+// for flushed items on top of it.
+type store struct {
+ db *bbolt.DB
+}
+
+const dbName = "small.bolt"
+
+func (c *cache) openStore(readOnly bool) error {
+ err := util.MkdirAllX(c.path, os.ModePerm)
+ if err != nil {
+ return err
+ }
+
+ c.db, err = OpenDB(c.path, readOnly, c.openFile)
+ if err != nil {
+ return fmt.Errorf("could not open database: %w", err)
+ }
+
+ c.db.MaxBatchSize = c.maxBatchSize
+ c.db.MaxBatchDelay = c.maxBatchDelay
+
+ if !readOnly {
+ err = c.db.Update(func(tx *bbolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists(defaultBucket)
+ return err
+ })
+ if err != nil {
+ return fmt.Errorf("could not create default bucket: %w", err)
+ }
+ }
+
+ c.fsTree = fstree.New(
+ fstree.WithPath(c.path),
+ fstree.WithPerm(os.ModePerm),
+ fstree.WithDepth(1),
+ fstree.WithDirNameLen(1),
+ fstree.WithNoSync(c.noSync),
+ fstree.WithFileCounter(&c.objCounters),
+ )
+ if err := c.fsTree.Open(readOnly); err != nil {
+ return fmt.Errorf("could not open FSTree: %w", err)
+ }
+ if err := c.fsTree.Init(); err != nil {
+ return fmt.Errorf("could not init FSTree: %w", err)
+ }
+
+ return nil
+}
+
+func (c *cache) deleteFromDB(key string) {
+ var recordDeleted bool
+ err := c.db.Batch(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ key := []byte(key)
+ recordDeleted = !recordDeleted && b.Get(key) != nil
+ return b.Delete(key)
+ })
+
+ if err == nil {
+ c.metrics.Evict(writecache.StorageTypeDB)
+ storagelog.Write(c.log,
+ storagelog.AddressField(key),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("db DELETE"),
+ )
+ if recordDeleted {
+ c.objCounters.cDB.Add(math.MaxUint64)
+ c.estimateCacheSize()
+ }
+ } else {
+ c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
+ }
+}
+
+func (c *cache) deleteFromDisk(ctx context.Context, keys []string) []string {
+ if len(keys) == 0 {
+ return keys
+ }
+
+ var copyIndex int
+ var addr oid.Address
+
+ for i := range keys {
+ if err := addr.DecodeString(keys[i]); err != nil {
+ c.log.Error(logs.WritecacheCantParseAddress, zap.String("address", keys[i]))
+ continue
+ }
+
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
+ if err != nil && !client.IsErrObjectNotFound(err) {
+ c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+
+ // Save the key for the next iteration.
+ keys[copyIndex] = keys[i]
+ copyIndex++
+ continue
+ } else if err == nil {
+ storagelog.Write(c.log,
+ storagelog.AddressField(keys[i]),
+ storagelog.StorageTypeField(wcStorageType),
+ storagelog.OpField("fstree DELETE"),
+ )
+ c.metrics.Evict(writecache.StorageTypeFSTree)
+ c.estimateCacheSize()
+ }
+ }
+
+ return keys[:copyIndex]
+}
diff --git a/pkg/local_object_storage/writecache/writecachebbolt/util.go b/pkg/local_object_storage/writecache/writecachebbolt/util.go
new file mode 100644
index 000000000..fe225583c
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachebbolt/util.go
@@ -0,0 +1,20 @@
+package writecachebbolt
+
+import (
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ "go.etcd.io/bbolt"
+)
+
+// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
+ return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
+ NoFreelistSync: true,
+ ReadOnly: ro,
+ Timeout: 100 * time.Millisecond,
+ OpenFile: openFile,
+ })
+}
diff --git a/pkg/local_object_storage/writecache/writecachetest/flush.go b/pkg/local_object_storage/writecache/writecachetest/flush.go
new file mode 100644
index 000000000..6911344a2
--- /dev/null
+++ b/pkg/local_object_storage/writecache/writecachetest/flush.go
@@ -0,0 +1,185 @@
+package writecachetest
+
+import (
+ "context"
+ "path/filepath"
+ "sync/atomic"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ objCount = 4
+ smallSize = 256
+)
+
+type CreateCacheFunc[Option any] func(
+ t *testing.T,
+ smallSize uint64,
+ meta *meta.DB,
+ bs writecache.MainStorage,
+ opts ...Option,
+) writecache.Cache
+
+type TestFailureInjector[Option any] struct {
+ Desc string
+ InjectFn func(*testing.T, writecache.Cache)
+}
+
+type objectPair struct {
+ addr oid.Address
+ obj *objectSDK.Object
+}
+
+func TestFlush[Option any](
+ t *testing.T,
+ createCacheFn CreateCacheFunc[Option],
+ errCountOption func() (Option, *atomic.Uint32),
+ failures ...TestFailureInjector[Option],
+) {
+ t.Run("no errors", func(t *testing.T) {
+ wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ objects := putObjects(t, wc)
+
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
+
+ require.NoError(t, wc.Flush(context.Background(), false))
+
+ check(t, mb, bs, objects)
+ })
+
+ t.Run("flush on moving to degraded mode", func(t *testing.T) {
+ wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ objects := putObjects(t, wc)
+
+ // Blobstor is read-only, so we expect en error from `flush` here.
+ require.Error(t, wc.SetMode(mode.Degraded))
+
+ // First move to read-only mode to close background workers.
+ require.NoError(t, wc.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
+ require.NoError(t, wc.SetMode(mode.Degraded))
+
+ check(t, mb, bs, objects)
+ })
+
+ t.Run("ignore errors", func(t *testing.T) {
+ for _, f := range failures {
+ f := f
+ t.Run(f.Desc, func(t *testing.T) {
+ errCountOpt, errCount := errCountOption()
+ wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
+ objects := putObjects(t, wc)
+ f.InjectFn(t, wc)
+
+ require.NoError(t, wc.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(mode.ReadWrite))
+ require.NoError(t, mb.SetMode(mode.ReadWrite))
+
+ require.Equal(t, uint32(0), errCount.Load())
+ require.Error(t, wc.Flush(context.Background(), false))
+ require.True(t, errCount.Load() > 0)
+ require.NoError(t, wc.Flush(context.Background(), true))
+
+ check(t, mb, bs, objects)
+ })
+ }
+ })
+}
+
+func newCache[Option any](
+ t *testing.T,
+ createCacheFn CreateCacheFunc[Option],
+ smallSize uint64,
+ opts ...Option,
+) (writecache.Cache, *blobstor.BlobStor, *meta.DB) {
+ dir := t.TempDir()
+ mb := meta.New(
+ meta.WithPath(filepath.Join(dir, "meta")),
+ meta.WithEpochState(dummyEpoch{}))
+ require.NoError(t, mb.Open(context.Background(), false))
+ require.NoError(t, mb.Init())
+
+ bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
+ {
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(dir, "blob")),
+ fstree.WithDepth(0),
+ fstree.WithDirNameLen(1)),
+ },
+ }))
+ require.NoError(t, bs.Open(context.Background(), false))
+ require.NoError(t, bs.Init())
+
+ wc := createCacheFn(t, smallSize, mb, bs, opts...)
+ t.Cleanup(func() { require.NoError(t, wc.Close()) })
+ require.NoError(t, wc.Open(context.Background(), false))
+ require.NoError(t, wc.Init())
+
+ // First set mode for metabase and blobstor to prevent background flushes.
+ require.NoError(t, mb.SetMode(mode.ReadOnly))
+ require.NoError(t, bs.SetMode(mode.ReadOnly))
+
+ return wc, bs, mb
+}
+
+func putObject(t *testing.T, c writecache.Cache, size int) objectPair {
+ obj := testutil.GenerateObjectWithSize(size)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+
+ var prm common.PutPrm
+ prm.Address = objectCore.AddressOf(obj)
+ prm.Object = obj
+ prm.RawData = data
+
+ _, err = c.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ return objectPair{prm.Address, prm.Object}
+}
+
+func putObjects(t *testing.T, c writecache.Cache) []objectPair {
+ objects := make([]objectPair, objCount)
+ for i := range objects {
+ objects[i] = putObject(t, c, 1+(i%2)*smallSize)
+ }
+ return objects
+}
+
+func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) {
+ for i := range objects {
+ var mPrm meta.StorageIDPrm
+ mPrm.SetAddress(objects[i].addr)
+
+ mRes, err := mb.StorageID(context.Background(), mPrm)
+ require.NoError(t, err)
+
+ var prm common.GetPrm
+ prm.Address = objects[i].addr
+ prm.StorageID = mRes.StorageID()
+
+ res, err := bs.Get(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, objects[i].obj, res.Object)
+ }
+}
+
+type dummyEpoch struct{}
+
+func (dummyEpoch) CurrentEpoch() uint64 {
+ return 0
+}
diff --git a/internal/metrics/blobovnicza.go b/pkg/metrics/blobovnicza.go
similarity index 67%
rename from internal/metrics/blobovnicza.go
rename to pkg/metrics/blobovnicza.go
index 948272c88..a1ecbc700 100644
--- a/internal/metrics/blobovnicza.go
+++ b/pkg/metrics/blobovnicza.go
@@ -4,13 +4,12 @@ import (
"strconv"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
type BlobobvnizcaMetrics interface {
- SetBlobobvnizcaTreeMode(shardID, path string, mode mode.ComponentMode)
+ SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool)
CloseBlobobvnizcaTree(shardID, path string)
BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool)
AddBlobobvnizcaTreePut(shardID, path string, size int)
@@ -24,23 +23,16 @@ type BlobobvnizcaMetrics interface {
IncOpenBlobovniczaCount(shardID, path string)
DecOpenBlobovniczaCount(shardID, path string)
-
- BlobovniczaTreeRebuildStatus(shardID, path, status string)
- BlobovniczaTreeRebuildPercent(shardID, path string, value uint32)
- BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration)
}
type blobovnicza struct {
- treeMode *shardIDPathModeValue
- treeReqDuration *prometheus.HistogramVec
- treePut *prometheus.CounterVec
- treeGet *prometheus.CounterVec
- treeOpenSize *prometheus.GaugeVec
- treeOpenItems *prometheus.GaugeVec
- treeOpenCounter *prometheus.GaugeVec
- treeObjectMoveDuration *prometheus.HistogramVec
- treeRebuildStatus *shardIDPathModeValue
- treeRebuildPercent *prometheus.GaugeVec
+ treeMode *shardIDPathModeValue
+ treeReqDuration *prometheus.HistogramVec
+ treePut *prometheus.CounterVec
+ treeGet *prometheus.CounterVec
+ treeOpenSize *prometheus.GaugeVec
+ treeOpenItems *prometheus.GaugeVec
+ treeOpenCounter *prometheus.GaugeVec
}
func newBlobovnicza() *blobovnicza {
@@ -83,24 +75,11 @@ func newBlobovnicza() *blobovnicza {
Name: "open_blobovnicza_count",
Help: "Count of opened blobovniczas of Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}),
- treeObjectMoveDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: namespace,
- Subsystem: blobovniczaTreeSubSystem,
- Name: "object_move_duration_seconds",
- Help: "Accumulated Blobovnicza tree object move duration",
- }, []string{shardIDLabel, pathLabel}),
- treeRebuildStatus: newShardIDPathMode(blobovniczaTreeSubSystem, "rebuild_status", "Blobovnicza tree rebuild status"),
- treeRebuildPercent: metrics.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: blobovniczaTreeSubSystem,
- Name: "rebuild_complete_percent",
- Help: "Percent of rebuild completeness",
- }, []string{shardIDLabel, pathLabel}),
}
}
-func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, mod mode.ComponentMode) {
- b.treeMode.SetMode(shardID, path, mod.String())
+func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) {
+ b.treeMode.SetMode(shardID, path, modeFromBool(readOnly))
}
func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
@@ -117,15 +96,6 @@ func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
shardIDLabel: shardID,
pathLabel: path,
})
- b.treeObjectMoveDuration.DeletePartialMatch(prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- })
- b.treeRebuildPercent.DeletePartialMatch(prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- })
- b.treeRebuildStatus.SetMode(shardID, path, undefinedStatus)
}
func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
@@ -193,21 +163,3 @@ func (b *blobovnicza) SubOpenBlobovniczaItems(shardID, path string, items uint64
pathLabel: path,
}).Sub(float64(items))
}
-
-func (b *blobovnicza) BlobovniczaTreeRebuildStatus(shardID, path, status string) {
- b.treeRebuildStatus.SetMode(shardID, path, status)
-}
-
-func (b *blobovnicza) BlobovniczaTreeObjectMoved(shardID, path string, d time.Duration) {
- b.treeObjectMoveDuration.With(prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- }).Observe(d.Seconds())
-}
-
-func (b *blobovnicza) BlobovniczaTreeRebuildPercent(shardID, path string, value uint32) {
- b.treeRebuildPercent.With(prometheus.Labels{
- shardIDLabel: shardID,
- pathLabel: path,
- }).Set(float64(value))
-}
diff --git a/internal/metrics/blobstore.go b/pkg/metrics/blobstore.go
similarity index 100%
rename from internal/metrics/blobstore.go
rename to pkg/metrics/blobstore.go
diff --git a/internal/metrics/consts.go b/pkg/metrics/consts.go
similarity index 81%
rename from internal/metrics/consts.go
rename to pkg/metrics/consts.go
index 9123541ff..550eec424 100644
--- a/internal/metrics/consts.go
+++ b/pkg/metrics/consts.go
@@ -20,10 +20,6 @@ const (
treeServiceSubsystem = "treeservice"
writeCacheSubsystem = "writecache"
grpcServerSubsystem = "grpc_server"
- policerSubsystem = "policer"
- commonCacheSubsystem = "common_cache"
- multinetSubsystem = "multinet"
- qosSubsystem = "qos"
successLabel = "success"
shardIDLabel = "shard_id"
@@ -41,10 +37,6 @@ const (
storageLabel = "storage"
operationLabel = "operation"
endpointLabel = "endpoint"
- hitLabel = "hit"
- cacheLabel = "cache"
- sourceIPLabel = "source_ip"
- ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY"
@@ -52,5 +44,4 @@ const (
failedToDeleteStatus = "failed_to_delete"
deletedStatus = "deleted"
- undefinedStatus = "undefined"
)
diff --git a/pkg/metrics/engine.go b/pkg/metrics/engine.go
new file mode 100644
index 000000000..23d799e28
--- /dev/null
+++ b/pkg/metrics/engine.go
@@ -0,0 +1,122 @@
+package metrics
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type EngineMetrics interface {
+ AddMethodDuration(method string, d time.Duration)
+ AddToContainerSize(cnrID string, size int64)
+ IncErrorCounter(shardID string)
+ ClearErrorCounter(shardID string)
+ DeleteShardMetrics(shardID string)
+ AddToObjectCounter(shardID, objectType string, delta int)
+ SetObjectCounter(shardID, objectType string, v uint64)
+ AddToPayloadCounter(shardID string, size int64)
+ SetMode(shardID string, mode mode.Mode)
+
+ WriteCache() WriteCacheMetrics
+ GC() GCMetrics
+}
+
+type engineMetrics struct {
+ methodDuration *prometheus.HistogramVec
+ objectCounter *prometheus.GaugeVec
+ containerSize *prometheus.GaugeVec
+ payloadSize *prometheus.GaugeVec
+ errorCounter *prometheus.GaugeVec
+ mode *shardIDModeValue
+
+ gc *gcMetrics
+ writeCache *writeCacheMetrics
+}
+
+func newEngineMetrics() *engineMetrics {
+ return &engineMetrics{
+ containerSize: newEngineGaugeVector("container_size_bytes", "Accumulated size of all objects in a container", []string{containerIDLabelKey}),
+ payloadSize: newEngineGaugeVector("payload_size_bytes", "Accumulated size of all objects in a shard", []string{shardIDLabel}),
+ errorCounter: newEngineGaugeVector("errors_total", "Shard's error counter", []string{shardIDLabel}),
+ methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: "request_duration_seconds",
+ Help: "Duration of Engine requests",
+ }, []string{methodLabel}),
+ objectCounter: newEngineGaugeVector("objects_total", "Objects counters per shards", []string{shardIDLabel, typeLabel}),
+ gc: newGCMetrics(),
+ writeCache: newWriteCacheMetrics(),
+ mode: newShardIDMode(engineSubsystem, "mode_info", "Shard mode"),
+ }
+}
+
+func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec {
+ return metrics.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: name,
+ Help: help,
+ }, labels)
+}
+
+func (m *engineMetrics) AddMethodDuration(method string, d time.Duration) {
+ m.methodDuration.With(prometheus.Labels{
+ methodLabel: method,
+ }).Observe(d.Seconds())
+}
+
+func (m *engineMetrics) AddToContainerSize(cnrID string, size int64) {
+ m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size))
+}
+
+func (m *engineMetrics) AddToPayloadCounter(shardID string, size int64) {
+ m.payloadSize.With(prometheus.Labels{shardIDLabel: shardID}).Add(float64(size))
+}
+
+func (m *engineMetrics) IncErrorCounter(shardID string) {
+ m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Inc()
+}
+
+func (m *engineMetrics) ClearErrorCounter(shardID string) {
+ m.errorCounter.With(prometheus.Labels{shardIDLabel: shardID}).Set(0)
+}
+
+func (m *engineMetrics) DeleteShardMetrics(shardID string) {
+ m.errorCounter.Delete(prometheus.Labels{shardIDLabel: shardID})
+ m.payloadSize.Delete(prometheus.Labels{shardIDLabel: shardID})
+ m.objectCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.mode.Delete(shardID)
+}
+
+func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
+ m.objectCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ typeLabel: objectType,
+ },
+ ).Add(float64(delta))
+}
+
+func (m *engineMetrics) SetObjectCounter(shardID, objectType string, v uint64) {
+ m.objectCounter.With(
+ prometheus.Labels{
+ shardIDLabel: shardID,
+ typeLabel: objectType,
+ },
+ ).Set(float64(v))
+}
+
+func (m *engineMetrics) SetMode(shardID string, mode mode.Mode) {
+ m.mode.SetMode(shardID, mode.String())
+}
+
+func (m *engineMetrics) WriteCache() WriteCacheMetrics {
+ return m.writeCache
+}
+
+func (m *engineMetrics) GC() GCMetrics {
+ return m.gc
+}
diff --git a/internal/metrics/fstree.go b/pkg/metrics/fstree.go
similarity index 90%
rename from internal/metrics/fstree.go
rename to pkg/metrics/fstree.go
index ecd4352bb..4d4f0693b 100644
--- a/internal/metrics/fstree.go
+++ b/pkg/metrics/fstree.go
@@ -4,13 +4,12 @@ import (
"strconv"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
type FSTreeMetrics interface {
- SetMode(shardID, path string, mode mode.ComponentMode)
+ SetMode(shardID, path string, readOnly bool)
Close(shardID, path string)
MethodDuration(shardID, path string, method string, d time.Duration, success bool)
@@ -49,8 +48,8 @@ func newFSTreeMetrics() *fstreeMetrics {
}
}
-func (m *fstreeMetrics) SetMode(shardID, path string, mod mode.ComponentMode) {
- m.mode.SetMode(shardID, path, mod.String())
+func (m *fstreeMetrics) SetMode(shardID, path string, readOnly bool) {
+ m.mode.SetMode(shardID, path, modeFromBool(readOnly))
}
func (m *fstreeMetrics) Close(shardID, path string) {
diff --git a/internal/metrics/gc.go b/pkg/metrics/gc.go
similarity index 100%
rename from internal/metrics/gc.go
rename to pkg/metrics/gc.go
diff --git a/internal/metrics/grpc.go b/pkg/metrics/grpc.go
similarity index 100%
rename from internal/metrics/grpc.go
rename to pkg/metrics/grpc.go
diff --git a/internal/metrics/innerring.go b/pkg/metrics/innerring.go
similarity index 77%
rename from internal/metrics/innerring.go
rename to pkg/metrics/innerring.go
index d0cb8131f..d93b3c432 100644
--- a/internal/metrics/innerring.go
+++ b/pkg/metrics/innerring.go
@@ -4,8 +4,6 @@ import (
"strconv"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
@@ -16,10 +14,6 @@ type InnerRingServiceMetrics struct {
health prometheus.Gauge
eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics
- logMetrics logger.LogMetrics
- multinet *multinetMetrics
- // nolint: unused
- appInfo *ApplicationInfo
}
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
@@ -50,9 +44,6 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics {
health: health,
eventDuration: eventDuration,
morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
- appInfo: NewApplicationInfo(misc.Version),
- logMetrics: logger.NewLogMetrics(innerRingNamespace),
- multinet: newMultinetMetrics(innerRingNamespace),
}
}
@@ -76,11 +67,3 @@ func (m *InnerRingServiceMetrics) AddEvent(d time.Duration, typ string, success
func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics {
return m.morphCacheMetrics
}
-
-func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
- return m.logMetrics
-}
-
-func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics {
- return m.multinet
-}
diff --git a/internal/metrics/metabase.go b/pkg/metrics/metabase.go
similarity index 100%
rename from internal/metrics/metabase.go
rename to pkg/metrics/metabase.go
diff --git a/internal/metrics/mode.go b/pkg/metrics/mode.go
similarity index 86%
rename from internal/metrics/mode.go
rename to pkg/metrics/mode.go
index a9ac47acd..312a6b33d 100644
--- a/internal/metrics/mode.go
+++ b/pkg/metrics/mode.go
@@ -21,7 +21,7 @@ func newShardIDMode(subsystem, name, help string) *shardIDModeValue {
}
}
-func (m *shardIDModeValue) SetMode(shardID, mode string) {
+func (m *shardIDModeValue) SetMode(shardID string, mode string) {
m.modeValue.DeletePartialMatch(prometheus.Labels{
shardIDLabel: shardID,
})
@@ -54,7 +54,7 @@ func newShardIDPathMode(subsystem, name, help string) *shardIDPathModeValue {
}
}
-func (m *shardIDPathModeValue) SetMode(shardID, path, mode string) {
+func (m *shardIDPathModeValue) SetMode(shardID, path string, mode string) {
m.modeValue.DeletePartialMatch(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
@@ -74,12 +74,6 @@ func (m *shardIDPathModeValue) Delete(shardID, path string) {
})
}
-func (m *shardIDPathModeValue) DeleteByShardID(shardID string) {
- m.modeValue.DeletePartialMatch(prometheus.Labels{
- shardIDLabel: shardID,
- })
-}
-
func modeFromBool(readOnly bool) string {
modeValue := readWriteMode
if readOnly {
diff --git a/internal/metrics/morph.go b/pkg/metrics/morph.go
similarity index 93%
rename from internal/metrics/morph.go
rename to pkg/metrics/morph.go
index 02d7517bc..5215c674b 100644
--- a/internal/metrics/morph.go
+++ b/pkg/metrics/morph.go
@@ -4,6 +4,7 @@ import (
"strconv"
"time"
+ morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
@@ -15,7 +16,7 @@ type morphClientMetrics struct {
invokeDuration *prometheus.HistogramVec
}
-func newMorphClientMetrics() *morphClientMetrics {
+func NewMorphClientMetrics() morphmetrics.Register {
return &morphClientMetrics{
switchCount: metrics.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
diff --git a/internal/metrics/morphcache.go b/pkg/metrics/morphcache.go
similarity index 91%
rename from internal/metrics/morphcache.go
rename to pkg/metrics/morphcache.go
index 388cb11e8..a4dbbccfc 100644
--- a/internal/metrics/morphcache.go
+++ b/pkg/metrics/morphcache.go
@@ -18,6 +18,10 @@ type morphCacheMetrics struct {
var _ MorphCacheMetrics = (*morphCacheMetrics)(nil)
+func NewNodeMorphCacheMetrics() MorphCacheMetrics {
+ return newMorphCacheMetrics(namespace)
+}
+
func newMorphCacheMetrics(ns string) *morphCacheMetrics {
return &morphCacheMetrics{
methodDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
diff --git a/internal/metrics/node.go b/pkg/metrics/node.go
similarity index 64%
rename from internal/metrics/node.go
rename to pkg/metrics/node.go
index 8ade19eb2..ad6864b8f 100644
--- a/internal/metrics/node.go
+++ b/pkg/metrics/node.go
@@ -1,9 +1,6 @@
package metrics
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
- morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
@@ -21,14 +18,6 @@ type NodeMetrics struct {
metabase *metabaseMetrics
pilorama *piloramaMetrics
grpc *grpcServerMetrics
- policer *policerMetrics
- morphClient *morphClientMetrics
- morphCache *morphCacheMetrics
- log logger.LogMetrics
- multinet *multinetMetrics
- qos *QoSMetrics
- // nolint: unused
- appInfo *ApplicationInfo
}
func NewNodeMetrics() *NodeMetrics {
@@ -50,13 +39,6 @@ func NewNodeMetrics() *NodeMetrics {
metabase: newMetabaseMetrics(),
pilorama: newPiloramaMetrics(),
grpc: newGrpcServerMetrics(),
- policer: newPolicerMetrics(),
- morphClient: newMorphClientMetrics(),
- morphCache: newMorphCacheMetrics(namespace),
- log: logger.NewLogMetrics(namespace),
- appInfo: NewApplicationInfo(misc.Version),
- multinet: newMultinetMetrics(namespace),
- qos: newQoSMetrics(),
}
}
@@ -108,27 +90,3 @@ func (m *NodeMetrics) PiloramaMetrics() PiloramaMetrics {
func (m *NodeMetrics) GrpcServerMetrics() GrpcServerMetrics {
return m.grpc
}
-
-func (m *NodeMetrics) PolicerMetrics() PolicerMetrics {
- return m.policer
-}
-
-func (m *NodeMetrics) MorphClientMetrics() morphmetrics.Register {
- return m.morphClient
-}
-
-func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
- return m.morphCache
-}
-
-func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
- return m.log
-}
-
-func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
- return m.multinet
-}
-
-func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
- return m.qos
-}
diff --git a/internal/metrics/object.go b/pkg/metrics/object.go
similarity index 71%
rename from internal/metrics/object.go
rename to pkg/metrics/object.go
index e4f6dfde1..0ba994ed3 100644
--- a/internal/metrics/object.go
+++ b/pkg/metrics/object.go
@@ -9,14 +9,13 @@ import (
)
type ObjectServiceMetrics interface {
- AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
+ AddRequestDuration(method string, d time.Duration, success bool)
AddPayloadSize(method string, size int)
}
type objectServiceMetrics struct {
- methodDuration *prometheus.HistogramVec
- payloadCounter *prometheus.CounterVec
- ioTagOpsCounter *prometheus.CounterVec
+ methodDuration *prometheus.HistogramVec
+ payloadCounter *prometheus.CounterVec
}
func newObjectServiceMetrics() *objectServiceMetrics {
@@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics {
Name: "request_payload_bytes",
Help: "Object Service request payload",
}, []string{methodLabel}),
- ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "requests_total",
- Help: "Count of requests for each IO tag",
- }, []string{methodLabel, ioTagLabel}),
}
}
-func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
+func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
m.methodDuration.With(prometheus.Labels{
methodLabel: method,
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
- m.ioTagOpsCounter.With(prometheus.Labels{
- ioTagLabel: ioTag,
- methodLabel: method,
- }).Inc()
}
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
diff --git a/internal/metrics/pilorama.go b/pkg/metrics/pilorama.go
similarity index 91%
rename from internal/metrics/pilorama.go
rename to pkg/metrics/pilorama.go
index c669275fe..41672a4b5 100644
--- a/internal/metrics/pilorama.go
+++ b/pkg/metrics/pilorama.go
@@ -10,7 +10,7 @@ import (
)
type PiloramaMetrics interface {
- SetMode(shardID string, m mode.ComponentMode)
+ SetMode(shardID string, m mode.Mode)
Close(shardID string)
AddMethodDuration(shardID string, method string, d time.Duration, success bool)
@@ -33,7 +33,7 @@ type piloramaMetrics struct {
reqDuration *prometheus.HistogramVec
}
-func (m *piloramaMetrics) SetMode(shardID string, mode mode.ComponentMode) {
+func (m *piloramaMetrics) SetMode(shardID string, mode mode.Mode) {
m.mode.SetMode(shardID, mode.String())
}
diff --git a/internal/metrics/replicator.go b/pkg/metrics/replicator.go
similarity index 99%
rename from internal/metrics/replicator.go
rename to pkg/metrics/replicator.go
index ca72a3031..a1519ac95 100644
--- a/internal/metrics/replicator.go
+++ b/pkg/metrics/replicator.go
@@ -5,6 +5,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+//TODO
+
type ReplicatorMetrics interface {
IncInFlightRequest()
DecInFlightRequest()
diff --git a/internal/metrics/state.go b/pkg/metrics/state.go
similarity index 100%
rename from internal/metrics/state.go
rename to pkg/metrics/state.go
diff --git a/internal/metrics/treeservice.go b/pkg/metrics/treeservice.go
similarity index 81%
rename from internal/metrics/treeservice.go
rename to pkg/metrics/treeservice.go
index e192c4398..6702aa83c 100644
--- a/internal/metrics/treeservice.go
+++ b/pkg/metrics/treeservice.go
@@ -12,14 +12,12 @@ type TreeMetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
- AddOperation(string, string)
}
type treeServiceMetrics struct {
replicateTaskDuration *prometheus.HistogramVec
replicateWaitDuration *prometheus.HistogramVec
syncOpDuration *prometheus.HistogramVec
- ioTagOpsCounter *prometheus.CounterVec
}
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
@@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics {
Name: "sync_duration_seconds",
Help: "Duration of synchronization operations",
}, []string{successLabel}),
- ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: treeServiceSubsystem,
- Name: "requests_total",
- Help: "Count of requests for each IO tag",
- }, []string{methodLabel, ioTagLabel}),
}
}
@@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
}
-
-func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
- m.ioTagOpsCounter.With(prometheus.Labels{
- ioTagLabel: ioTag,
- methodLabel: op,
- }).Inc()
-}
diff --git a/internal/metrics/types.go b/pkg/metrics/types.go
similarity index 100%
rename from internal/metrics/types.go
rename to pkg/metrics/types.go
diff --git a/internal/metrics/writecache.go b/pkg/metrics/writecache.go
similarity index 62%
rename from internal/metrics/writecache.go
rename to pkg/metrics/writecache.go
index 1b708f710..7e6083a49 100644
--- a/internal/metrics/writecache.go
+++ b/pkg/metrics/writecache.go
@@ -1,7 +1,7 @@
package metrics
import (
- "strconv"
+ "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
@@ -9,12 +9,18 @@ import (
)
type WriteCacheMetrics interface {
- AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration)
- SetActualCount(shardID, path, storageType string, count uint64)
- SetEstimateSize(shardID, path, storageType string, size uint64)
- SetMode(shardID, mode string)
- IncOperationCounter(shardID, path, storageType, operation string, success NullBool)
- Close(shardID, path string)
+ AddMethodDuration(shardID string, method string, success bool, d time.Duration, storageType string)
+
+ IncActualCount(shardID string, storageType string)
+ DecActualCount(shardID string, storageType string)
+ SetActualCount(shardID string, count uint64, storageType string)
+
+ SetEstimateSize(shardID string, size uint64, storageType string)
+ SetMode(shardID string, mode string)
+
+ IncOperationCounter(shardID string, operation string, success NullBool, storageType string)
+
+ Close(shardID string)
}
type writeCacheMetrics struct {
@@ -35,44 +41,55 @@ func newWriteCacheMetrics() *writeCacheMetrics {
Subsystem: writeCacheSubsystem,
Name: "request_duration_seconds",
Help: "Writecache request process duration",
- }, []string{shardIDLabel, successLabel, storageLabel, methodLabel, pathLabel}),
+ }, []string{shardIDLabel, successLabel, storageLabel, methodLabel}),
operationCounter: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: writeCacheSubsystem,
Name: "operations_total",
Help: "The number of writecache operations processed",
- }, []string{shardIDLabel, storageLabel, successLabel, operationLabel, pathLabel}),
- actualCount: newWCGaugeVec("actual_objects_total", "Actual objects count in writecache", []string{shardIDLabel, storageLabel, pathLabel}),
- estimatedSize: newWCGaugeVec("estimated_size_bytes", "Estimated writecache size", []string{shardIDLabel, storageLabel, pathLabel}),
+ }, []string{shardIDLabel, storageLabel, successLabel, operationLabel}),
+ actualCount: newWCGaugeVec("actual_objects_total", "Actual objects count in writecache", []string{shardIDLabel, storageLabel}),
+ estimatedSize: newWCGaugeVec("estimated_size_bytes", "Estimated writecache size", []string{shardIDLabel, storageLabel}),
mode: newShardIDMode(writeCacheSubsystem, "mode_info", "Writecache mode value"),
}
}
-func (m *writeCacheMetrics) AddMethodDuration(shardID, path, storageType, method string, success bool, d time.Duration) {
+func (m *writeCacheMetrics) AddMethodDuration(shardID string, method string, success bool, d time.Duration, storageType string) {
m.methodDuration.With(
prometheus.Labels{
shardIDLabel: shardID,
- successLabel: strconv.FormatBool(success),
+ successLabel: fmt.Sprintf("%v", success),
storageLabel: storageType,
methodLabel: method,
- pathLabel: path,
},
).Observe(d.Seconds())
}
-func (m *writeCacheMetrics) SetActualCount(shardID, path, storageType string, count uint64) {
+func (m *writeCacheMetrics) IncActualCount(shardID string, storageType string) {
+ m.actualCount.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ storageLabel: storageType,
+ }).Inc()
+}
+
+func (m *writeCacheMetrics) DecActualCount(shardID string, storageType string) {
+ m.actualCount.With(prometheus.Labels{
+ shardIDLabel: shardID,
+ storageLabel: storageType,
+ }).Dec()
+}
+
+func (m *writeCacheMetrics) SetActualCount(shardID string, count uint64, storageType string) {
m.actualCount.With(prometheus.Labels{
shardIDLabel: shardID,
storageLabel: storageType,
- pathLabel: path,
}).Set(float64(count))
}
-func (m *writeCacheMetrics) SetEstimateSize(shardID, path, storageType string, size uint64) {
+func (m *writeCacheMetrics) SetEstimateSize(shardID string, size uint64, storageType string) {
m.estimatedSize.With(prometheus.Labels{
shardIDLabel: shardID,
storageLabel: storageType,
- pathLabel: path,
}).Set(float64(size))
}
@@ -80,22 +97,21 @@ func (m *writeCacheMetrics) SetMode(shardID string, mode string) {
m.mode.SetMode(shardID, mode)
}
-func (m *writeCacheMetrics) IncOperationCounter(shardID, path, storageType, operation string, success NullBool) {
+func (m *writeCacheMetrics) IncOperationCounter(shardID string, operation string, success NullBool, storageType string) {
m.operationCounter.With(prometheus.Labels{
shardIDLabel: shardID,
storageLabel: storageType,
operationLabel: operation,
successLabel: success.String(),
- pathLabel: path,
}).Inc()
}
-func (m *writeCacheMetrics) Close(shardID, path string) {
+func (m *writeCacheMetrics) Close(shardID string) {
m.mode.Delete(shardID)
- m.methodDuration.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
- m.operationCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
- m.actualCount.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
- m.estimatedSize.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID, pathLabel: path})
+ m.methodDuration.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.operationCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.actualCount.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
+ m.estimatedSize.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
}
func newWCGaugeVec(name, help string, labels []string) *prometheus.GaugeVec {
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
deleted file mode 100644
index 2849f3052..000000000
--- a/pkg/morph/client/actor.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package client
-
-import (
- "github.com/google/uuid"
- "github.com/nspcc-dev/neo-go/pkg/config/netmode"
- "github.com/nspcc-dev/neo-go/pkg/core/transaction"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type actorProvider interface {
- GetActor() *actor.Actor
- GetRPCActor() actor.RPCActor
-}
-
-// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
-// This leads to an invalidation of an rpc actor within Client. That means the
-// components that are initilized with the rpc actor may unintentionally use
-// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
-// this situation, getting the rpc actor from Client.
-type SwitchRPCGuardedActor struct {
- actorProvider actorProvider
-}
-
-func NewSwitchRPCGuardedActor(c *Client) *SwitchRPCGuardedActor {
- return &SwitchRPCGuardedActor{
- actorProvider: c,
- }
-}
-
-func (a *SwitchRPCGuardedActor) Call(contract util.Uint160, operation string, params ...any) (*result.Invoke, error) {
- return a.actorProvider.GetActor().Call(contract, operation, params...)
-}
-
-func (a *SwitchRPCGuardedActor) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) {
- return a.actorProvider.GetActor().CalculateNetworkFee(tx)
-}
-
-func (a *SwitchRPCGuardedActor) CalculateValidUntilBlock() (uint32, error) {
- return a.actorProvider.GetActor().CalculateValidUntilBlock()
-}
-
-func (a *SwitchRPCGuardedActor) GetBlockCount() (uint32, error) {
- return a.actorProvider.GetActor().GetBlockCount()
-}
-
-func (a *SwitchRPCGuardedActor) GetNetwork() netmode.Magic {
- return a.actorProvider.GetActor().GetNetwork()
-}
-
-func (a *SwitchRPCGuardedActor) GetVersion() result.Version {
- return a.actorProvider.GetActor().GetVersion()
-}
-
-func (a *SwitchRPCGuardedActor) MakeCall(contract util.Uint160, method string, params ...any) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeCall(contract, method, params...)
-}
-
-func (a *SwitchRPCGuardedActor) MakeRun(script []byte) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeRun(script)
-}
-
-func (a *SwitchRPCGuardedActor) MakeTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeTunedCall(contract, method, attrs, txHook, params...)
-}
-
-func (a *SwitchRPCGuardedActor) MakeTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeTunedRun(script, attrs, txHook)
-}
-
-func (a *SwitchRPCGuardedActor) MakeUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeUncheckedRun(script, sysfee, attrs, txHook)
-}
-
-func (a *SwitchRPCGuardedActor) MakeUnsignedCall(contract util.Uint160, method string, attrs []transaction.Attribute, params ...any) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeUnsignedCall(contract, method, attrs, params...)
-}
-
-func (a *SwitchRPCGuardedActor) MakeUnsignedRun(script []byte, attrs []transaction.Attribute) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeUnsignedRun(script, attrs)
-}
-
-func (a *SwitchRPCGuardedActor) MakeUnsignedUncheckedRun(script []byte, sysFee int64, attrs []transaction.Attribute) (*transaction.Transaction, error) {
- return a.actorProvider.GetActor().MakeUnsignedUncheckedRun(script, sysFee, attrs)
-}
-
-func (a *SwitchRPCGuardedActor) Send(tx *transaction.Transaction) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().Send(tx)
-}
-
-func (a *SwitchRPCGuardedActor) SendCall(contract util.Uint160, method string, params ...any) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SendCall(contract, method, params...)
-}
-
-func (a *SwitchRPCGuardedActor) SendRun(script []byte) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SendRun(script)
-}
-
-func (a *SwitchRPCGuardedActor) SendTunedCall(contract util.Uint160, method string, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier, params ...any) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SendTunedCall(contract, method, attrs, txHook, params...)
-}
-
-func (a *SwitchRPCGuardedActor) SendTunedRun(script []byte, attrs []transaction.Attribute, txHook actor.TransactionCheckerModifier) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SendTunedRun(script, attrs, txHook)
-}
-
-func (a *SwitchRPCGuardedActor) SendUncheckedRun(script []byte, sysfee int64, attrs []transaction.Attribute, txHook actor.TransactionModifier) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SendUncheckedRun(script, sysfee, attrs, txHook)
-}
-
-func (a *SwitchRPCGuardedActor) Sender() util.Uint160 {
- return a.actorProvider.GetActor().Sender()
-}
-
-func (a *SwitchRPCGuardedActor) Sign(tx *transaction.Transaction) error {
- return a.actorProvider.GetActor().Sign(tx)
-}
-
-func (a *SwitchRPCGuardedActor) SignAndSend(tx *transaction.Transaction) (util.Uint256, uint32, error) {
- return a.actorProvider.GetActor().SignAndSend(tx)
-}
-
-func (a *SwitchRPCGuardedActor) CallAndExpandIterator(contract util.Uint160, method string, maxItems int, params ...any) (*result.Invoke, error) {
- return a.actorProvider.GetActor().CallAndExpandIterator(contract, method, maxItems, params...)
-}
-
-func (a *SwitchRPCGuardedActor) TerminateSession(sessionID uuid.UUID) error {
- return a.actorProvider.GetActor().TerminateSession(sessionID)
-}
-
-func (a *SwitchRPCGuardedActor) TraverseIterator(sessionID uuid.UUID, iterator *result.Iterator, num int) ([]stackitem.Item, error) {
- return a.actorProvider.GetActor().TraverseIterator(sessionID, iterator, num)
-}
-
-func (a *SwitchRPCGuardedActor) GetRPCActor() actor.RPCActor {
- return a.actorProvider.GetRPCActor()
-}
-
-func (a *SwitchRPCGuardedActor) GetRPCInvoker() invoker.RPCInvoke {
- return a.actorProvider.GetRPCActor()
-}
diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go
index 4462daab4..aae245acd 100644
--- a/pkg/morph/client/balance/balanceOf.go
+++ b/pkg/morph/client/balance/balanceOf.go
@@ -1,33 +1,36 @@
package balance
import (
- "context"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// BalanceOf receives the amount of funds in the client's account
// through the Balance contract call, and returns it.
-func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) {
- h := id.ScriptHash()
+func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
+ h, err := address.StringToUint160(id.EncodeToString())
+ if err != nil {
+ return nil, err
+ }
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(balanceOfMethod)
invokePrm.SetArgs(h)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err)
} else if ln := len(prms); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln)
}
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
+ return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}
diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go
index f4685b0ab..88a1aa8f1 100644
--- a/pkg/morph/client/balance/burn.go
+++ b/pkg/morph/client/balance/burn.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,11 @@ func (b *BurnPrm) SetID(id []byte) {
}
// Burn destroys funds from the account.
-func (c *Client) Burn(ctx context.Context, p BurnPrm) error {
+func (c *Client) Burn(p BurnPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(burnMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
- return err
+ return c.client.Invoke(prm)
}
diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go
index 1dacb9574..b05c526dc 100644
--- a/pkg/morph/client/balance/client.go
+++ b/pkg/morph/client/balance/client.go
@@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'balance' contract client: %w", err)
+ return nil, fmt.Errorf("could not create static client of Balance contract: %w", err)
}
return &Client{
@@ -54,7 +54,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go
index 57e61d62b..39e4b28e5 100644
--- a/pkg/morph/client/balance/decimals.go
+++ b/pkg/morph/client/balance/decimals.go
@@ -1,7 +1,6 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,20 +8,20 @@ import (
// Decimals decimal precision of currency transactions
// through the Balance contract call, and returns it.
-func (c *Client) Decimals(ctx context.Context) (uint32, error) {
+func (c *Client) Decimals() (uint32, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(decimalsMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err)
} else if ln := len(prms); ln != 1 {
return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln)
}
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
- return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
+ return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}
diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go
index 83e8b0586..7b270808e 100644
--- a/pkg/morph/client/balance/lock.go
+++ b/pkg/morph/client/balance/lock.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -44,12 +42,11 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) {
}
// Lock locks fund on the user account.
-func (c *Client) Lock(ctx context.Context, p LockPrm) error {
+func (c *Client) Lock(p LockPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(lockMethod)
prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
- return err
+ return c.client.Invoke(prm)
}
diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go
index 082ade85e..cda78c077 100644
--- a/pkg/morph/client/balance/mint.go
+++ b/pkg/morph/client/balance/mint.go
@@ -1,8 +1,6 @@
package balance
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/util"
)
@@ -32,12 +30,11 @@ func (m *MintPrm) SetID(id []byte) {
}
// Mint sends funds to the account.
-func (c *Client) Mint(ctx context.Context, p MintPrm) error {
+func (c *Client) Mint(p MintPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(mintMethod)
prm.SetArgs(p.to, p.amount, p.id)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
- return err
+ return c.client.Invoke(prm)
}
diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go
index 870bed166..5206f69fa 100644
--- a/pkg/morph/client/balance/transfer.go
+++ b/pkg/morph/client/balance/transfer.go
@@ -1,11 +1,11 @@
package balance
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/encoding/address"
)
// TransferPrm groups parameters of TransferX method.
@@ -21,18 +21,27 @@ type TransferPrm struct {
// TransferX transfers p.Amount of GASe-12 from p.From to p.To
// with details p.Details through direct smart contract call.
-func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
- from := p.From.ScriptHash()
- to := p.To.ScriptHash()
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) TransferX(p TransferPrm) error {
+ from, err := address.StringToUint160(p.From.EncodeToString())
+ if err != nil {
+ return err
+ }
+
+ to, err := address.StringToUint160(p.To.EncodeToString())
+ if err != nil {
+ return err
+ }
prm := client.InvokePrm{}
prm.SetMethod(transferXMethod)
prm.SetArgs(from, to, p.Amount, p.Details)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ err = c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
}
return nil
}
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index aab058d27..606f3bd66 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -9,26 +9,21 @@ import (
"sync/atomic"
"time"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/google/uuid"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -61,9 +56,6 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
- nnsHash util.Uint160 // NNS contract hash
-
- nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@@ -81,8 +73,6 @@ type Client struct {
// channel for internal stop
closeChan chan struct{}
- closed atomic.Bool
- wg sync.WaitGroup
// indicates that Client is not able to
// establish connection to any of the
@@ -98,12 +88,27 @@ type Client struct {
type cache struct {
m sync.RWMutex
+ nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
+func (c *cache) nns() *util.Uint160 {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.nnsHash
+}
+
+func (c *cache) setNNSHash(nnsHash util.Uint160) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.nnsHash = &nnsHash
+}
+
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@@ -122,6 +127,7 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
+ c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@@ -151,10 +157,23 @@ func (e *notHaltStateError) Error() string {
)
}
+// implementation of error interface for FrostFS-specific errors.
+type frostfsError struct {
+ err error
+}
+
+func (e frostfsError) Error() string {
+ return fmt.Sprintf("frostfs error: %v", e.err)
+}
+
+// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
+func wrapFrostFSError(err error) error {
+ return frostfsError{err}
+}
+
// Invoke invokes contract method by sending transaction into blockchain.
-// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
-func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) {
+func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error {
start := time.Now()
success := false
defer func() {
@@ -165,39 +184,33 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return ErrConnectionLost
}
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
- return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
+ return fmt.Errorf("could not invoke %s: %w", method, err)
}
- c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
+ c.logger.Debug(logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
success = true
- return InvokeRes{Hash: txHash, VUB: vub}, nil
+ return nil
}
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
-// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
-// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
-// The default batchSize is 100, the default limit from neo-go.
-func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
+// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
+func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, contract util.Uint160, method string, args ...interface{}) error {
start := time.Now()
success := false
defer func() {
c.metrics.ObserveInvoke("TestInvokeIterator", contract.String(), method, success, time.Since(start))
}()
- if batchSize <= 0 {
- batchSize = invoker.DefaultIteratorResultItems
- }
-
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -205,55 +218,34 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
return ErrConnectionLost
}
- script, err := smartcontract.CreateCallAndPrefetchIteratorScript(contract, method, batchSize, args...)
- if err != nil {
- return err
- }
-
- val, err := c.rpcActor.Run(script)
+ val, err := c.rpcActor.Call(contract, method, args...)
if err != nil {
return err
} else if val.State != HaltState {
- return ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
- arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
+ sid, r, err := unwrap.SessionIterator(val, err)
if err != nil {
return err
}
- for i := range arr {
- if err := cb(arr[i]); err != nil {
- return err
- }
- }
- if (sid == uuid.UUID{}) {
- success = true
- return nil
- }
defer func() {
_ = c.rpcActor.TerminateSession(sid)
}()
- // Batch size for TraverseIterator() can restricted on the server-side.
- traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
- for {
- items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
- if err != nil {
- return err
- }
-
+ items, err := c.rpcActor.TraverseIterator(sid, &r, 0)
+ for err == nil && len(items) != 0 {
for i := range items {
- if err := cb(items[i]); err != nil {
+ if err = cb(items[i]); err != nil {
return err
}
}
- if len(items) < traverseBatchSize {
- break
- }
+ items, err = c.rpcActor.TraverseIterator(sid, &r, 0)
}
- success = true
- return nil
+
+ success = err == nil
+ return err
}
// TestInvoke invokes contract method locally in neo-go node. This method should
@@ -278,7 +270,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
- return nil, ¬HaltStateError{state: val.State, exception: val.FaultException}
+ return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException})
}
success = true
@@ -299,7 +291,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
+ c.logger.Debug(logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -333,7 +325,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
+ c.logger.Debug(logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -360,8 +352,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight,
+ zap.String("error", err.Error()))
return nil
}
@@ -374,8 +366,8 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
- zap.Error(err))
+ c.logger.Error(logs.ClientCantGetBlockchainHeight243,
+ zap.String("error", err.Error()))
return nil
}
@@ -433,28 +425,6 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) {
return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil
}
-func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return nil, ErrConnectionLost
- }
-
- return c.client.GetApplicationLog(hash, trig)
-}
-
-func (c *Client) GetVersion() (*result.Version, error) {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- if c.inactive {
- return nil, ErrConnectionLost
- }
-
- return c.client.GetVersion()
-}
-
// TxHeight returns true if transaction has been successfully executed and persisted.
func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
c.switchLock.RLock()
@@ -470,7 +440,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) {
// NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain
// stores alphabet node keys of inner ring there, however the sidechain stores both
// alphabet and non alphabet node keys of inner ring.
-func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) {
+func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -480,7 +450,7 @@ func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
- return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
+ return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
}
return list, nil
@@ -494,7 +464,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
- return nil, fmt.Errorf("get chain height: %w", err)
+ return nil, fmt.Errorf("can't get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)
@@ -557,6 +527,15 @@ func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (val
return res.State == vmstate.Halt.String(), nil
}
+// NotificationChannel returns channel than receives subscribed
+// notification from the connected RPC node.
+// Channel is closed when connection to the RPC node is lost.
+func (c *Client) NotificationChannel() <-chan rpcclient.Notification {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+ return c.client.Notifications //lint:ignore SA1019 waits for neo-go v0.102.0 https://github.com/nspcc-dev/neo-go/pull/2980
+}
+
func (c *Client) Metrics() morphmetrics.Register {
return c.metrics
}
@@ -565,19 +544,4 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
- c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
-}
-
-func (c *Client) GetActor() *actor.Actor {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- return c.rpcActor
-}
-
-func (c *Client) GetRPCActor() actor.RPCActor {
- c.switchLock.RLock()
- defer c.switchLock.RUnlock()
-
- return c.client
}
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index e4dcd0db7..e7e1bbca9 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -4,11 +4,10 @@ import (
"context"
"errors"
"fmt"
- "net"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
@@ -42,13 +41,13 @@ type cfg struct {
endpoints []Endpoint
+ singleCli *rpcclient.WSClient // neo-go client for single client mode
+
inactiveModeCb Callback
switchInterval time.Duration
morphCacheMetrics metrics.MorphCacheMetrics
-
- dialerSource DialerSource
}
const (
@@ -56,19 +55,20 @@ const (
defaultWaitInterval = 500 * time.Millisecond
)
-var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
+var (
+ ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
+)
func defaultConfig() *cfg {
return &cfg{
dialTimeout: defaultDialTimeout,
- logger: logger.NewLoggerWrapper(zap.L()),
+ logger: &logger.Logger{Logger: zap.L()},
metrics: morphmetrics.NoopRegister{},
waitInterval: defaultWaitInterval,
signer: &transaction.Signer{
Scopes: transaction.Global,
},
morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
- dialerSource: &noopDialerSource{},
}
}
@@ -126,30 +126,37 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
var err error
var act *actor.Actor
- var endpoint Endpoint
- for cli.endpoints.curr, endpoint = range cli.endpoints.list {
- cli.client, act, err = cli.newCli(ctx, endpoint)
+ if cfg.singleCli != nil {
+ // return client in single RPC node mode that uses
+ // predefined WS client
+ //
+ // in case of the closing web socket connection:
+ // if extra endpoints were provided via options,
+ // they will be used in switch process, otherwise
+ // inactive mode will be enabled
+ cli.client = cfg.singleCli
+
+ act, err = newActor(cfg.singleCli, acc, *cfg)
if err != nil {
- cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
- zap.Error(err), zap.String("endpoint", endpoint.Address))
- } else {
- cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
- zap.String("endpoint", endpoint.Address))
- if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
- cli.switchIsActive.Store(true)
- go cli.switchToMostPrioritized(ctx)
+ return nil, fmt.Errorf("could not create RPC actor: %w", err)
+ }
+ } else {
+ var endpoint Endpoint
+ for cli.endpoints.curr, endpoint = range cli.endpoints.list {
+ cli.client, act, err = cli.newCli(ctx, endpoint.Address)
+ if err != nil {
+ cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+ zap.Error(err), zap.String("endpoint", endpoint.Address))
+ } else {
+ cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
+ zap.String("endpoint", endpoint.Address))
+ break
}
- break
+ }
+ if cli.client == nil {
+ return nil, ErrNoHealthyEndpoint
}
}
- if cli.client == nil {
- return nil, ErrNoHealthyEndpoint
- }
- cs, err := cli.client.GetContractStateByID(nnsContractID)
- if err != nil {
- return nil, fmt.Errorf("resolve nns hash: %w", err)
- }
- cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)
@@ -157,16 +164,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
return cli, nil
}
-func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSClient, *actor.Actor, error) {
- cfg, err := endpoint.MTLSConfig.parse()
- if err != nil {
- return nil, nil, fmt.Errorf("read mtls certificates: %w", err)
- }
- cli, err := rpcclient.NewWS(ctx, endpoint.Address, rpcclient.WSOptions{
+func (c *Client) newCli(ctx context.Context, endpoint string) (*rpcclient.WSClient, *actor.Actor, error) {
+ cli, err := rpcclient.NewWS(ctx, endpoint, rpcclient.WSOptions{
Options: rpcclient.Options{
- DialTimeout: c.cfg.dialTimeout,
- TLSClientConfig: cfg,
- NetDialContext: c.cfg.dialerSource.NetContextDialer(),
+ DialTimeout: c.cfg.dialTimeout,
},
})
if err != nil {
@@ -277,6 +278,17 @@ func WithEndpoints(endpoints ...Endpoint) Option {
}
}
+// WithSingleClient returns a client constructor option
+// that specifies single neo-go client and forces Client
+// to use it for requests.
+//
+// Passed client must already be initialized.
+func WithSingleClient(cli *rpcclient.WSClient) Option {
+ return func(c *cfg) {
+ c.singleCli = cli
+ }
+}
+
// WithConnLostCallback return a client constructor option
// that specifies a callback that is called when Client
// unsuccessfully tried to connect to all the specified
@@ -301,19 +313,3 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
c.morphCacheMetrics = morphCacheMetrics
}
}
-
-type DialerSource interface {
- NetContextDialer() func(context.Context, string, string) (net.Conn, error)
-}
-
-type noopDialerSource struct{}
-
-func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
- return nil
-}
-
-func WithDialerSource(ds DialerSource) Option {
- return func(c *cfg) {
- c.dialerSource = ds
- }
-}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index be684619b..c8702b1c7 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -27,13 +27,24 @@ const (
getMethod = "get"
listMethod = "list"
containersOfMethod = "containersOf"
+ eaclMethod = "eACL"
+ setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
+ startEstimationMethod = "startContainerEstimation"
+ stopEstimationMethod = "stopContainerEstimation"
+
+ putSizeMethod = "putContainerSize"
+ listSizesMethod = "listContainerSizes"
+ getSizeMethod = "getContainerSize"
+
// putNamedMethod is method name for container put with an alias. It is exported to provide custom fee.
putNamedMethod = "putNamed"
)
-var errNilArgument = errors.New("empty argument")
+var (
+ errNilArgument = errors.New("empty argument")
+)
// NewFromMorph returns the wrapper instance from the raw morph client.
//
@@ -46,9 +57,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
opts[i](o)
}
- sc, err := client.NewStatic(cli, contract, fee, *o...)
+ sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...)
if err != nil {
- return nil, fmt.Errorf("create 'container' contract client: %w", err)
+ return nil, fmt.Errorf("can't create container static client: %w", err)
}
return &Client{client: sc}, nil
@@ -68,10 +79,20 @@ func (c Client) ContractAddress() util.Uint160 {
// parameter of Wrapper.
type Option func(*opts)
-type opts []client.StaticClientOption
+type opts struct {
+ staticOpts []client.StaticClientOption
+}
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ o.staticOpts = append(o.staticOpts, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
@@ -81,6 +102,6 @@ func defaultOpts() *opts {
// Considered to be used by IR nodes only.
func AsAlphabet() Option {
return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
+ o.staticOpts = append(o.staticOpts, client.AsAlphabet())
}
}
diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go
index 60fb8ad7c..8a3c7220f 100644
--- a/pkg/morph/client/container/containers_of.go
+++ b/pkg/morph/client/container/containers_of.go
@@ -1,9 +1,10 @@
package container
import (
- "context"
"errors"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -14,54 +15,39 @@ import (
// to the specified user of FrostFS system. If idUser is nil, returns the list of all containers.
//
// If remote RPC does not support neo-go session API, fallback to List() method.
-func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) {
- var cidList []cid.ID
- var err error
-
- cb := func(id cid.ID) error {
- cidList = append(cidList, id)
- return nil
- }
- if err = c.IterateContainersOf(ctx, idUser, cb); err != nil {
- return nil, err
- }
- return cidList, nil
-}
-
-// iterateContainers iterates over a list of container identifiers
-// belonging to the specified user of FrostFS system and executes
-// `cb` on each element. If idUser is nil, calls it on the list of all containers.
-func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error {
+func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
+
if idUser != nil {
rawID = idUser.WalletBytes()
}
- itemCb := func(item stackitem.Item) error {
- id, err := getCIDfromStackItem(item)
+ var cidList []cid.ID
+ cb := func(item stackitem.Item) error {
+ rawID, err := client.BytesFromStackItem(item)
if err != nil {
- return err
+ return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
}
- if err = cb(id); err != nil {
- return err
+
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
return nil
}
- // We would like to have batch size as big as possible,
- // to reduce the number of round-trips and avoid creating sessions.
- // The limit depends on 2 things:
- // 1. VM limits: max 2048 items on stack.
- // 2. JSON encoded size for the item with type = 128k.
- // It turns out, that for container ID the second limit is hit first,
- // 512 is big enough value and it is beautiful.
- const batchSize = 512
-
cnrHash := c.client.ContractAddress()
- err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
- if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
- return c.iterate(ctx, idUser, cb)
+ err := c.client.Morph().TestInvokeIterator(cb, cnrHash, containersOfMethod, rawID)
+ if err != nil {
+ if errors.Is(err, unwrap.ErrNoSessionID) {
+ return c.List(idUser)
+ }
+ return nil, err
}
- return err
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go
index 09912efa5..5bc8fc188 100644
--- a/pkg/morph/client/container/delete.go
+++ b/pkg/morph/client/container/delete.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
@@ -13,7 +12,7 @@ import (
// along with signature and session token.
//
// Returns error if container ID is nil.
-func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
+func Delete(c *Client, witness core.RemovalWitness) error {
binCnr := make([]byte, sha256.Size)
witness.ContainerID.Encode(binCnr)
@@ -27,8 +26,7 @@ func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error {
prm.SetToken(tok.Marshal())
}
- _, err := c.Delete(ctx, prm)
- return err
+ return c.Delete(prm)
}
// DeletePrm groups parameters of Delete client operation.
@@ -64,11 +62,13 @@ func (d *DeletePrm) SetKey(key []byte) {
// Delete removes the container from FrostFS system
// through Container contract call.
//
-// Returns valid until block and any error encountered that caused
+// Returns any error encountered that caused
// the removal to interrupt.
-func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
- if len(p.signature) == 0 && !p.IsControl() {
- return 0, errNilArgument
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Delete(p DeletePrm) error {
+ if len(p.signature) == 0 {
+ return errNilArgument
}
prm := client.InvokePrm{}
@@ -76,9 +76,9 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
prm.SetArgs(p.cnr, p.signature, p.key, p.token)
prm.InvokePrmOptional = p.InvokePrmOptional
- res, err := c.client.Invoke(ctx, prm)
+ err := c.client.Invoke(prm)
if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
+ return fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
}
- return res.VUB, nil
+ return nil
}
diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go
index 90bcdd7d5..40eb267d6 100644
--- a/pkg/morph/client/container/deletion_info.go
+++ b/pkg/morph/client/container/deletion_info.go
@@ -1,7 +1,6 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
@@ -11,66 +10,59 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/mr-tron/base58"
)
-func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) {
- return DeletionInfo(ctx, (*Client)(x), cnr)
+func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
+ return DeletionInfo((*Client)(x), cnr)
}
type deletionInfo interface {
- DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error)
+ DeletionInfo(cid []byte) (*containercore.DelInfo, error)
}
-func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
+func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.DeletionInfo(ctx, binCnr)
+ return c.DeletionInfo(binCnr)
}
-func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) {
+func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
return nil, fmt.Errorf("unexpected container stack item count (%s): %d", deletionInfoMethod, len(arr))
}
- rawOwner, err := client.BytesFromStackItem(arr[0])
+ owner, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
}
- var owner user.ID
- if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil {
- return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err)
- }
-
- epoch, err := client.BigIntFromStackItem(arr[1])
+ epoch, err := client.IntFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
Owner: owner,
- Epoch: epoch.Uint64(),
+ Epoch: int(epoch),
}, nil
}
diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go
new file mode 100644
index 000000000..8e9455050
--- /dev/null
+++ b/pkg/morph/client/container/eacl.go
@@ -0,0 +1,95 @@
+package container
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+)
+
+// GetEACL reads the extended ACL table from FrostFS system
+// through Container contract call.
+//
+// Returns apistatus.EACLNotFound if eACL table is missing in the contract.
+func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) {
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(eaclMethod)
+ prm.SetArgs(binCnr)
+
+ prms, err := c.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln)
+ }
+
+ arr, err := client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ if len(arr) != 4 {
+ return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr))
+ }
+
+ rawEACL, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err)
+ }
+
+ sig, err := client.BytesFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err)
+ }
+
+ // Client may not return errors if the table is missing, so check this case additionally.
+ // The absence of a signature in the response can be taken as an eACL absence criterion,
+ // since unsigned table cannot be approved in the storage by design.
+ if len(sig) == 0 {
+ return nil, new(apistatus.EACLNotFound)
+ }
+
+ pub, err := client.BytesFromStackItem(arr[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err)
+ }
+
+ binToken, err := client.BytesFromStackItem(arr[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err)
+ }
+
+ var res container.EACL
+
+ res.Value = eacl.NewTable()
+ if err = res.Value.Unmarshal(rawEACL); err != nil {
+ return nil, err
+ }
+
+ if len(binToken) > 0 {
+ res.Session = new(session.Container)
+
+ err = res.Session.Unmarshal(binToken)
+ if err != nil {
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
+ }
+ }
+
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
+ var sigV2 refs.Signature
+ sigV2.SetKey(pub)
+ sigV2.SetSign(sig)
+ sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256)
+
+ err = res.Signature.ReadFromV2(sigV2)
+ return &res, err
+}
diff --git a/pkg/morph/client/container/eacl_set.go b/pkg/morph/client/container/eacl_set.go
new file mode 100644
index 000000000..2d2ffb456
--- /dev/null
+++ b/pkg/morph/client/container/eacl_set.go
@@ -0,0 +1,93 @@
+package container
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+)
+
+// PutEACL marshals table, and passes it to Wrapper's PutEACLBinary method
+// along with sig.Key() and sig.Sign().
+//
+// Returns error if table is nil.
+//
+// If TryNotary is provided, calls notary contract.
+func PutEACL(c *Client, eaclInfo containercore.EACL) error {
+ if eaclInfo.Value == nil {
+ return errNilArgument
+ }
+
+ data, err := eaclInfo.Value.Marshal()
+ if err != nil {
+ return fmt.Errorf("can't marshal eacl table: %w", err)
+ }
+
+ var prm PutEACLPrm
+ prm.SetTable(data)
+
+ if eaclInfo.Session != nil {
+ prm.SetToken(eaclInfo.Session.Marshal())
+ }
+
+ // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion
+ var sigV2 refs.Signature
+ eaclInfo.Signature.WriteToV2(&sigV2)
+
+ prm.SetKey(sigV2.GetKey())
+ prm.SetSignature(sigV2.GetSign())
+
+ return c.PutEACL(prm)
+}
+
+// PutEACLPrm groups parameters of PutEACL operation.
+type PutEACLPrm struct {
+ table []byte
+ key []byte
+ sig []byte
+ token []byte
+
+ client.InvokePrmOptional
+}
+
+// SetTable sets table.
+func (p *PutEACLPrm) SetTable(table []byte) {
+ p.table = table
+}
+
+// SetKey sets key.
+func (p *PutEACLPrm) SetKey(key []byte) {
+ p.key = key
+}
+
+// SetSignature sets signature.
+func (p *PutEACLPrm) SetSignature(sig []byte) {
+ p.sig = sig
+}
+
+// SetToken sets session token.
+func (p *PutEACLPrm) SetToken(token []byte) {
+ p.token = token
+}
+
+// PutEACL saves binary eACL table with its session token, key and signature
+// in FrostFS system through Container contract call.
+//
+// Returns any error encountered that caused the saving to interrupt.
+func (c *Client) PutEACL(p PutEACLPrm) error {
+ if len(p.sig) == 0 || len(p.key) == 0 {
+ return errNilArgument
+ }
+
+ prm := client.InvokePrm{}
+ prm.SetMethod(setEACLMethod)
+ prm.SetArgs(p.table, p.sig, p.key, p.token)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ err := c.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", setEACLMethod, err)
+ }
+ return nil
+}
diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go
new file mode 100644
index 000000000..6adf67476
--- /dev/null
+++ b/pkg/morph/client/container/estimations.go
@@ -0,0 +1,54 @@
+package container
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+)
+
+// StartEstimationPrm groups parameters of StartEstimation operation.
+type StartEstimationPrm struct {
+ commonEstimationPrm
+}
+
+// StopEstimationPrm groups parameters of StopEstimation operation.
+type StopEstimationPrm struct {
+ commonEstimationPrm
+}
+
+type commonEstimationPrm struct {
+ epoch uint64
+
+ client.InvokePrmOptional
+}
+
+// SetEpoch sets epoch.
+func (p *commonEstimationPrm) SetEpoch(epoch uint64) {
+ p.epoch = epoch
+}
+
+// StartEstimation votes to produce start estimation notification.
+func (c *Client) StartEstimation(p StartEstimationPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(startEstimationMethod)
+ prm.SetArgs(p.epoch)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ if err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err)
+ }
+ return nil
+}
+
+// StopEstimation votes to produce stop estimation notification.
+func (c *Client) StopEstimation(p StopEstimationPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(stopEstimationMethod)
+ prm.SetArgs(p.epoch)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ if err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err)
+ }
+ return nil
+}
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 8622d2cdd..6715f870f 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -1,15 +1,14 @@
package container
import (
- "context"
"crypto/sha256"
"fmt"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@@ -17,8 +16,8 @@ import (
type containerSource Client
-func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) {
- return Get(ctx, (*Client)(x), cnr)
+func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
+ return Get((*Client)(x), cnr)
}
// AsContainerSource provides container Source interface
@@ -28,15 +27,15 @@ func AsContainerSource(w *Client) containercore.Source {
}
type getContainer interface {
- Get(ctx context.Context, cid []byte) (*containercore.Container, error)
+ Get(cid []byte) (*containercore.Container, error)
}
// Get marshals container ID, and passes it to Wrapper's Get method.
-func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) {
+func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
- return c.Get(ctx, binCnr)
+ return c.Get(binCnr)
}
// Get reads the container from FrostFS system by binary identifier
@@ -44,24 +43,24 @@ func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Contai
//
// If an empty slice is returned for the requested identifier,
// storage.ErrNotFound error is returned.
-func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) {
+func (c *Client) Get(cid []byte) (*containercore.Container, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(getMethod)
prm.SetArgs(cid)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
- return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err)
}
if len(arr) != 4 {
@@ -70,29 +69,29 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
cnrBytes, err := client.BytesFromStackItem(arr[0])
if err != nil {
- return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err)
}
sigBytes, err := client.BytesFromStackItem(arr[1])
if err != nil {
- return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err)
}
pub, err := client.BytesFromStackItem(arr[2])
if err != nil {
- return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err)
}
tokBytes, err := client.BytesFromStackItem(arr[3])
if err != nil {
- return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err)
+ return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err)
}
var cnr containercore.Container
if err := cnr.Value.Unmarshal(cnrBytes); err != nil {
// use other major version if there any
- return nil, fmt.Errorf("unmarshal container: %w", err)
+ return nil, fmt.Errorf("can't unmarshal container: %w", err)
}
if len(tokBytes) > 0 {
@@ -100,7 +99,7 @@ func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container,
err = cnr.Session.Unmarshal(tokBytes)
if err != nil {
- return nil, fmt.Errorf("unmarshal session token: %w", err)
+ return nil, fmt.Errorf("could not unmarshal session token: %w", err)
}
}
diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go
index fc63d1beb..8f165f4b1 100644
--- a/pkg/morph/client/container/list.go
+++ b/pkg/morph/client/container/list.go
@@ -1,22 +1,20 @@
package container
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
)
-// iterate iterates through a list of container identifiers belonging
+// List returns a list of container identifiers belonging
// to the specified user of FrostFS system. The list is composed
// through Container contract call.
//
-// Iterates through the identifiers of all FrostFS containers if pointer
+// Returns the identifiers of all FrostFS containers if pointer
// to user identifier is nil.
-func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error {
+func (c *Client) List(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
if idUser != nil {
@@ -27,43 +25,34 @@ func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) e
prm.SetMethod(listMethod)
prm.SetArgs(rawID)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return fmt.Errorf("test invoke (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err)
} else if ln := len(res); ln != 1 {
- return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln)
}
res, err = client.ArrayFromStackItem(res[0])
if err != nil {
- return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err)
}
+ cidList := make([]cid.ID, 0, len(res))
for i := range res {
- id, err := getCIDfromStackItem(res[i])
+ rawID, err := client.BytesFromStackItem(res[i])
if err != nil {
- return err
+ return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err)
}
- if err = cb(id); err != nil {
- return err
+ var id cid.ID
+
+ err = id.Decode(rawID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
}
+
+ cidList = append(cidList, id)
}
- return nil
-}
-
-func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) {
- rawID, err := client.BytesFromStackItem(item)
- if err != nil {
- return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err)
- }
-
- var id cid.ID
-
- err = id.Decode(rawID)
- if err != nil {
- return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
- }
- return id, nil
+ return cidList, nil
}
diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go
new file mode 100644
index 000000000..dcf89f73e
--- /dev/null
+++ b/pkg/morph/client/container/load.go
@@ -0,0 +1,171 @@
+package container
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+// AnnounceLoadPrm groups parameters of AnnounceLoad operation.
+type AnnounceLoadPrm struct {
+ a container.SizeEstimation
+ key []byte
+
+ client.InvokePrmOptional
+}
+
+// SetAnnouncement sets announcement.
+func (a2 *AnnounceLoadPrm) SetAnnouncement(a container.SizeEstimation) {
+ a2.a = a
+}
+
+// SetReporter sets public key of the reporter.
+func (a2 *AnnounceLoadPrm) SetReporter(key []byte) {
+ a2.key = key
+}
+
+// AnnounceLoad saves container size estimation calculated by storage node
+// with key in FrostFS system through Container contract call.
+//
+// Returns any error encountered that caused the saving to interrupt.
+func (c *Client) AnnounceLoad(p AnnounceLoadPrm) error {
+ binCnr := make([]byte, sha256.Size)
+ p.a.Container().Encode(binCnr)
+
+ prm := client.InvokePrm{}
+ prm.SetMethod(putSizeMethod)
+ prm.SetArgs(p.a.Epoch(), binCnr, p.a.Value(), p.key)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ err := c.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", putSizeMethod, err)
+ }
+ return nil
+}
+
+// EstimationID is an identity of container load estimation inside Container contract.
+type EstimationID []byte
+
+// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch.
+// The list is composed through Container contract call.
+func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) {
+ invokePrm := client.TestInvokePrm{}
+ invokePrm.SetMethod(listSizesMethod)
+ invokePrm.SetArgs(epoch)
+
+ prms, err := c.client.TestInvoke(invokePrm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err)
+ }
+
+ res := make([]EstimationID, 0, len(prms))
+ for i := range prms {
+ id, err := client.BytesFromStackItem(prms[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err)
+ }
+
+ res = append(res, id)
+ }
+
+ return res, nil
+}
+
+// Estimation is a structure of single container load estimation
+// reported by storage node.
+type Estimation struct {
+ Size uint64
+
+ Reporter []byte
+}
+
+// Estimations is a structure of grouped container load estimation inside Container contract.
+type Estimations struct {
+ ContainerID cid.ID
+
+ Values []Estimation
+}
+
+// GetUsedSpaceEstimations returns a list of container load estimations by ID.
+// The list is composed through Container contract call.
+func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(getSizeMethod)
+ prm.SetArgs([]byte(id))
+
+ prms, err := c.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err)
+ } else if ln := len(prms); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err)
+ } else if ln := len(prms); ln != 2 {
+ return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod)
+ }
+
+ rawCnr, err := client.BytesFromStackItem(prms[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ prms, err = client.ArrayFromStackItem(prms[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ var cnr cid.ID
+
+ err = cnr.Decode(rawCnr)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ v2 := new(v2refs.ContainerID)
+ v2.SetValue(rawCnr)
+ res := &Estimations{
+ ContainerID: cnr,
+ Values: make([]Estimation, 0, len(prms)),
+ }
+
+ for i := range prms {
+ arr, err := client.ArrayFromStackItem(prms[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err)
+ } else if ln := len(arr); ln != 2 {
+ return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod)
+ }
+
+ reporter, err := client.BytesFromStackItem(arr[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ sz, err := client.IntFromStackItem(arr[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err)
+ }
+
+ res.Values = append(res.Values, Estimation{
+ Reporter: reporter,
+ Size: uint64(sz),
+ })
+ }
+
+ return res, nil
+}
diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go
index 3bb84eb87..5c23eb36d 100644
--- a/pkg/morph/client/container/put.go
+++ b/pkg/morph/client/container/put.go
@@ -1,12 +1,11 @@
package container
import (
- "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -15,7 +14,7 @@ import (
// along with sig.Key() and sig.Sign().
//
// Returns error if container is nil.
-func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) {
+func Put(c *Client, cnr containercore.Container) (*cid.ID, error) {
data := cnr.Value.Marshal()
d := container.ReadDomain(cnr.Value)
@@ -36,7 +35,7 @@ func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID,
prm.SetKey(sigV2.GetKey())
prm.SetSignature(sigV2.GetSign())
- err := c.Put(ctx, prm)
+ err := c.Put(prm)
if err != nil {
return nil, err
}
@@ -94,7 +93,9 @@ func (p *PutPrm) SetZone(zone string) {
//
// Returns calculated container identifier and any error
// encountered that caused the saving to interrupt.
-func (c *Client) Put(ctx context.Context, p PutPrm) error {
+//
+// If TryNotary is provided, calls notary contract.
+func (c *Client) Put(p PutPrm) error {
if len(p.sig) == 0 || len(p.key) == 0 {
return errNilArgument
}
@@ -115,9 +116,9 @@ func (c *Client) Put(ctx context.Context, p PutPrm) error {
prm.SetMethod(method)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
+ err := c.client.Invoke(prm)
if err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
diff --git a/pkg/morph/client/frostfs/bind.go b/pkg/morph/client/frostfs/bind.go
new file mode 100644
index 000000000..5b15d5c7b
--- /dev/null
+++ b/pkg/morph/client/frostfs/bind.go
@@ -0,0 +1,71 @@
+package frostfscontract
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+)
+
+type commonBindArgs struct {
+ scriptHash []byte // script hash of account identifier
+
+ keys [][]byte // list of serialized public keys
+
+ client.InvokePrmOptional
+}
+
+// SetOptionalPrm sets optional client parameters.
+func (x *commonBindArgs) SetOptionalPrm(op client.InvokePrmOptional) {
+ x.InvokePrmOptional = op
+}
+
+// SetScriptHash sets script hash of the FrostFS account identifier.
+func (x *commonBindArgs) SetScriptHash(v []byte) {
+ x.scriptHash = v
+}
+
+// SetKeys sets a list of public keys in a binary format.
+func (x *commonBindArgs) SetKeys(v [][]byte) {
+ x.keys = v
+}
+
+// BindKeysPrm groups parameters of BindKeys operation.
+type BindKeysPrm struct {
+ commonBindArgs
+}
+
+// BindKeys binds list of public keys from FrostFS account by script hash.
+func (x *Client) BindKeys(p BindKeysPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(bindKeysMethod)
+ prm.SetArgs(p.scriptHash, p.keys)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ err := x.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", bindKeysMethod, err)
+ }
+
+ return nil
+}
+
+// UnbindKeysPrm groups parameters of UnbindKeys operation.
+type UnbindKeysPrm struct {
+ commonBindArgs
+}
+
+// UnbindKeys invokes the call of key unbinding method
+// of FrostFS contract.
+func (x *Client) UnbindKeys(args UnbindKeysPrm) error {
+ prm := client.InvokePrm{}
+ prm.SetMethod(unbindKeysMethod)
+ prm.SetArgs(args.scriptHash, args.keys)
+ prm.InvokePrmOptional = args.InvokePrmOptional
+
+ err := x.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", unbindKeysMethod, err)
+ }
+
+ return nil
+}
diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go
index d3eba7639..e8f4f7f18 100644
--- a/pkg/morph/client/frostfs/cheque.go
+++ b/pkg/morph/client/frostfs/cheque.go
@@ -1,8 +1,6 @@
package frostfscontract
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -39,14 +37,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) {
}
// Cheque invokes `cheque` method of FrostFS contract.
-func (x *Client) Cheque(ctx context.Context, p ChequePrm) error {
+func (x *Client) Cheque(p ChequePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(chequeMethod)
prm.SetArgs(p.id, p.user, p.amount, p.lock)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
- return err
+ return x.client.Invoke(prm)
}
// AlphabetUpdatePrm groups parameters of AlphabetUpdate operation.
@@ -68,12 +65,11 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) {
}
// AlphabetUpdate update list of alphabet nodes.
-func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error {
+func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error {
prm := client.InvokePrm{}
prm.SetMethod(alphabetUpdateMethod)
prm.SetArgs(p.id, p.pubs)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := x.client.Invoke(ctx, prm)
- return err
+ return x.client.Invoke(prm)
}
diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go
index cd6a9849e..3e3e70ec0 100644
--- a/pkg/morph/client/frostfs/client.go
+++ b/pkg/morph/client/frostfs/client.go
@@ -21,6 +21,8 @@ type Client struct {
}
const (
+ bindKeysMethod = "bind"
+ unbindKeysMethod = "unbind"
alphabetUpdateMethod = "alphabetUpdate"
chequeMethod = "cheque"
)
@@ -35,7 +37,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'frostfs' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err)
}
return &Client{client: sc}, nil
diff --git a/pkg/morph/client/frostfsid/addrm_keys.go b/pkg/morph/client/frostfsid/addrm_keys.go
new file mode 100644
index 000000000..cbbd05142
--- /dev/null
+++ b/pkg/morph/client/frostfsid/addrm_keys.go
@@ -0,0 +1,61 @@
+package frostfsid
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+)
+
+type CommonBindPrm struct {
+ ownerID []byte // FrostFS account identifier
+
+ keys [][]byte // list of serialized public keys
+
+ client.InvokePrmOptional
+}
+
+func (x *CommonBindPrm) SetOptionalPrm(prm client.InvokePrmOptional) {
+ x.InvokePrmOptional = prm
+}
+
+// SetOwnerID sets FrostFS account identifier.
+func (x *CommonBindPrm) SetOwnerID(v []byte) {
+ x.ownerID = v
+}
+
+// SetKeys sets a list of public keys in a binary format.
+func (x *CommonBindPrm) SetKeys(v [][]byte) {
+ x.keys = v
+}
+
+// AddKeys adds a list of public keys to/from FrostFS account.
+func (x *Client) AddKeys(p CommonBindPrm) error {
+ prm := client.InvokePrm{}
+
+ prm.SetMethod(addKeysMethod)
+ prm.SetArgs(p.ownerID, p.keys)
+ prm.InvokePrmOptional = p.InvokePrmOptional
+
+ err := x.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", addKeysMethod, err)
+ }
+
+ return nil
+}
+
+// RemoveKeys removes a list of public keys to/from FrostFS account.
+func (x *Client) RemoveKeys(args CommonBindPrm) error {
+ prm := client.InvokePrm{}
+
+ prm.SetMethod(removeKeysMethod)
+ prm.SetArgs(args.ownerID, args.keys)
+ prm.InvokePrmOptional = args.InvokePrmOptional
+
+ err := x.client.Invoke(prm)
+ if err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", removeKeysMethod, err)
+ }
+
+ return nil
+}
diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go
index 61eb03f09..dded61aff 100644
--- a/pkg/morph/client/frostfsid/client.go
+++ b/pkg/morph/client/frostfsid/client.go
@@ -3,7 +3,6 @@ package frostfsid
import (
"fmt"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -21,14 +20,53 @@ type Client struct {
client *client.StaticClient // static FrostFS ID contract client
}
-var _ frostfsidcore.SubjectProvider = (*Client)(nil)
+const (
+ keyListingMethod = "key"
+ addKeysMethod = "addKey"
+ removeKeysMethod = "removeKey"
+)
// NewFromMorph wraps client to work with FrostFS ID contract.
-func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) {
- sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet())
+func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) {
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err)
+ return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err)
}
return &Client{client: sc}, nil
}
+
+// Option allows to set an optional
+// parameter of ClientWrapper.
+type Option func(*opts)
+
+type opts []client.StaticClientOption
+
+func defaultOpts() *opts {
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
+}
+
+// AsAlphabet returns option to sign main TX
+// of notary requests with client's private
+// key.
+//
+// Considered to be used by IR nodes only.
+func AsAlphabet() Option {
+ return func(o *opts) {
+ *o = append(*o, client.AsAlphabet())
+ }
+}
diff --git a/pkg/morph/client/frostfsid/keys.go b/pkg/morph/client/frostfsid/keys.go
new file mode 100644
index 000000000..3bae7adfb
--- /dev/null
+++ b/pkg/morph/client/frostfsid/keys.go
@@ -0,0 +1,54 @@
+package frostfsid
+
+import (
+ "crypto/elliptic"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// AccountKeysPrm groups parameters of AccountKeys operation.
+type AccountKeysPrm struct {
+ id user.ID
+}
+
+// SetID sets owner ID.
+func (a *AccountKeysPrm) SetID(id user.ID) {
+ a.id = id
+}
+
+// AccountKeys requests public keys of FrostFS account from FrostFS ID contract.
+func (x *Client) AccountKeys(p AccountKeysPrm) (keys.PublicKeys, error) {
+ prm := client.TestInvokePrm{}
+ prm.SetMethod(keyListingMethod)
+ prm.SetArgs(p.id.WalletBytes())
+
+ items, err := x.client.TestInvoke(prm)
+ if err != nil {
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", keyListingMethod, err)
+ } else if ln := len(items); ln != 1 {
+ return nil, fmt.Errorf("unexpected stack item count (%s): %d", keyListingMethod, ln)
+ }
+
+ items, err = client.ArrayFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("1st stack item must be an array (%s)", keyListingMethod)
+ }
+
+ pubs := make(keys.PublicKeys, len(items))
+ for i := range items {
+ rawPub, err := client.BytesFromStackItem(items[i])
+ if err != nil {
+ return nil, fmt.Errorf("invalid stack item, expected byte array (%s)", keyListingMethod)
+ }
+
+ pubs[i], err = keys.NewPublicKeyFromBytes(rawPub, elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("received invalid key (%s): %w", keyListingMethod, err)
+ }
+ }
+
+ return pubs, nil
+}
diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go
deleted file mode 100644
index 3a789672a..000000000
--- a/pkg/morph/client/frostfsid/subject.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package frostfsid
-
-import (
- "context"
- "fmt"
-
- frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-const (
- methodGetSubject = "getSubject"
- methodGetSubjectExtended = "getSubjectExtended"
-)
-
-func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(methodGetSubject)
- prm.SetArgs(addr)
-
- res, err := c.client.TestInvoke(ctx, prm)
- if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err)
- }
-
- structArr, err := checkStackItem(res)
- if err != nil {
- return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err)
- }
-
- subj, err := frostfsidclient.ParseSubject(structArr)
- if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
- }
-
- return subj, nil
-}
-
-func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) {
- prm := client.TestInvokePrm{}
- prm.SetMethod(methodGetSubjectExtended)
- prm.SetArgs(addr)
-
- res, err := c.client.TestInvoke(ctx, prm)
- if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err)
- }
-
- structArr, err := checkStackItem(res)
- if err != nil {
- return nil, fmt.Errorf("invalid test invocation result (%s): %w", methodGetSubjectExtended, err)
- }
-
- subj, err := frostfsidclient.ParseSubjectExtended(structArr)
- if err != nil {
- return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err)
- }
-
- return subj, nil
-}
-
-func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error) {
- if ln := len(res); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", methodGetSubject, ln)
- }
-
- structArr, err = client.ArrayFromStackItem(res[0])
- if err != nil {
- return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err)
- }
- return
-}
diff --git a/pkg/morph/client/mtls.go b/pkg/morph/client/mtls.go
deleted file mode 100644
index 3de51afe7..000000000
--- a/pkg/morph/client/mtls.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client
-
-import (
- "crypto/tls"
-
- "github.com/nspcc-dev/neo-go/pkg/rpcclient"
-)
-
-// MTLSConfig represents endpoint mTLS configuration.
-type MTLSConfig struct {
- TrustedCAList []string
- KeyFile string
- CertFile string
-}
-
-func (m *MTLSConfig) parse() (*tls.Config, error) {
- if m == nil {
- return nil, nil
- }
-
- return rpcclient.TLSClientConfig(m.TrustedCAList, m.CertFile, m.KeyFile)
-}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index b9e39c25e..e006ca69a 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -2,7 +2,6 @@ package client
import (
"context"
- "slices"
"sort"
"time"
@@ -12,9 +11,8 @@ import (
// Endpoint represents morph endpoint together with its priority.
type Endpoint struct {
- Address string
- Priority int
- MTLSConfig *MTLSConfig
+ Address string
+ Priority int
}
type endpoints struct {
@@ -40,11 +38,11 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
// Iterate endpoints in the order of decreasing priority.
for c.endpoints.curr = range c.endpoints.list {
- newEndpoint := c.endpoints.list[c.endpoints.curr]
+ newEndpoint := c.endpoints.list[c.endpoints.curr].Address
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
- zap.String("endpoint", newEndpoint.Address),
+ c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+ zap.String("endpoint", newEndpoint),
zap.Error(err),
)
@@ -53,8 +51,8 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
- zap.String("endpoint", newEndpoint.Address))
+ c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+ zap.String("endpoint", newEndpoint))
c.client = cli
c.setActor(act)
@@ -77,8 +75,6 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
}
func (c *Client) closeWaiter(ctx context.Context) {
- c.wg.Add(1)
- defer c.wg.Done()
select {
case <-ctx.Done():
case <-c.closeChan:
@@ -100,7 +96,8 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
- endpointsCopy := slices.Clone(c.endpoints.list)
+ endpointsCopy := make([]Endpoint, len(c.endpoints.list))
+ copy(endpointsCopy, c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority
@@ -120,9 +117,9 @@ mainLoop:
tryE := e.Address
- cli, act, err := c.newCli(ctx, e)
+ cli, act, err := c.newCli(ctx, tryE)
if err != nil {
- c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+ c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -147,7 +144,7 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
+ c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go
index de8afbfb5..eafa097e9 100644
--- a/pkg/morph/client/netmap/client.go
+++ b/pkg/morph/client/netmap/client.go
@@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8,
sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
if err != nil {
- return nil, fmt.Errorf("create 'netmap' contract client: %w", err)
+ return nil, fmt.Errorf("can't create netmap static client: %w", err)
}
return &Client{client: sc}, nil
@@ -65,7 +65,15 @@ type Option func(*opts)
type opts []client.StaticClientOption
func defaultOpts() *opts {
- return &opts{client.TryNotary()}
+ return new(opts)
+}
+
+// TryNotary returns option to enable
+// notary invocation tries.
+func TryNotary() Option {
+ return func(o *opts) {
+ *o = append(*o, client.TryNotary())
+ }
}
// AsAlphabet returns option to sign main TX
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 3f6aed506..09ae09b51 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -11,8 +11,6 @@ import (
const (
MaxObjectSizeConfig = "MaxObjectSize"
- MaxECParityCountConfig = "MaxECParityCount"
- MaxECDataCountConfig = "MaxECDataCount"
EpochDurationConfig = "EpochDuration"
ContainerFeeConfig = "ContainerFee"
ContainerAliasFeeConfig = "ContainerAliasFee"
@@ -24,45 +22,75 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
-func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, MaxObjectSizeConfig)
+func (c *Client) MaxObjectSize() (uint64, error) {
+ objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
+ }
+
+ return objectSize, nil
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
-func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, EpochDurationConfig)
+func (c *Client) EpochDuration() (uint64, error) {
+ epochDuration, err := c.readUInt64Config(EpochDurationConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
+ }
+
+ return epochDuration, nil
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
-func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerFeeConfig)
+func (c *Client) ContainerFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
+ }
+
+ return fee, nil
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
-func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
+func (c *Client) ContainerAliasFee() (uint64, error) {
+ fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
+ }
+
+ return fee, nil
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
// Returns (false, nil) if config key is not found in the contract.
-func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey)
+func (c *Client) HomomorphicHashDisabled() (bool, error) {
+ return c.readBoolConfig(HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
-func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, IrCandidateFeeConfig)
+func (c *Client) InnerRingCandidateFee() (uint64, error) {
+ fee, err := c.readUInt64Config(IrCandidateFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
+ }
+
+ return fee, nil
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
-func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
- return c.readUInt64Config(ctx, WithdrawFeeConfig)
+func (c *Client) WithdrawFee() (uint64, error) {
+ fee, err := c.readUInt64Config(WithdrawFeeConfig)
+ if err != nil {
+ return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
+ }
+
+ return fee, nil
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@@ -70,32 +98,34 @@ func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
// that storage nodes are allowed to switch their state to "maintenance".
//
// By default, maintenance state is disallowed.
-func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
- return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig)
+func (c *Client) MaintenanceModeAllowed() (bool, error) {
+ return c.readBoolConfig(MaintenanceModeAllowedConfig)
}
-func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
- v, err := c.config(ctx, []byte(key))
- if err != nil {
- return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
- }
-
- bi, err := v.TryInteger()
+func (c *Client) readUInt64Config(key string) (uint64, error) {
+ v, err := c.config([]byte(key), IntegerAssert)
if err != nil {
return 0, err
}
- return bi.Uint64(), nil
+
+ // IntegerAssert is guaranteed to return int64 if the error is nil.
+ return uint64(v.(int64)), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
-func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
- v, err := c.config(ctx, []byte(key))
+func (c *Client) readBoolConfig(key string) (bool, error) {
+ v, err := c.config([]byte(key), BoolAssert)
if err != nil {
- return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
+ if errors.Is(err, ErrConfigNotFound) {
+ return false, nil
+ }
+
+ return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err)
}
- return v.TryBool()
+ // BoolAssert is guaranteed to return bool if the error is nil.
+ return v.(bool), nil
}
// SetConfigPrm groups parameters of SetConfig operation.
@@ -123,14 +153,13 @@ func (s *SetConfigPrm) SetValue(value any) {
}
// SetConfig sets config field.
-func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error {
+func (c *Client) SetConfig(p SetConfigPrm) error {
prm := client.InvokePrm{}
prm.SetMethod(setConfigMethod)
prm.SetArgs(p.id, p.key, p.value)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
- return err
+ return c.client.Invoke(prm)
}
// RawNetworkParameter is a FrostFS network parameter which is transmitted but
@@ -166,14 +195,14 @@ type NetworkConfiguration struct {
}
// ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain.
-func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) {
+func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
var res NetworkConfiguration
prm := client.TestInvokePrm{}
prm.SetMethod(configListMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return res, fmt.Errorf("test invoke (%s): %w",
+ return res, fmt.Errorf("could not perform test invocation (%s): %w",
configListMethod, err)
}
@@ -244,18 +273,22 @@ func bytesToBool(val []byte) bool {
return false
}
+// ErrConfigNotFound is returned when the requested key was not found
+// in the network config (returned value is `Null`).
+var ErrConfigNotFound = errors.New("config value not found")
+
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
-func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
+func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
configMethod, err)
}
@@ -264,7 +297,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error)
configMethod, ln)
}
- return items[0], nil
+ if _, ok := items[0].(stackitem.Null); ok {
+ return nil, ErrConfigNotFound
+ }
+
+ return assert(items[0])
+}
+
+// IntegerAssert converts stack item to int64.
+func IntegerAssert(item stackitem.Item) (any, error) {
+ return client.IntFromStackItem(item)
+}
+
+// StringAssert converts stack item to string.
+func StringAssert(item stackitem.Item) (any, error) {
+ return client.StringFromStackItem(item)
+}
+
+// BoolAssert converts stack item to bool.
+func BoolAssert(item stackitem.Item) (any, error) {
+ return client.BoolFromStackItem(item)
}
// iterateRecords iterates over all config records and passes them to f.
diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go
index 8561329ec..92d569ae2 100644
--- a/pkg/morph/client/netmap/epoch.go
+++ b/pkg/morph/client/netmap/epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,13 +8,13 @@ import (
// Epoch receives number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) Epoch(ctx context.Context) (uint64, error) {
+func (c *Client) Epoch() (uint64, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(epochMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
epochMethod, err)
}
@@ -26,20 +25,20 @@ func (c *Client) Epoch(ctx context.Context) (uint64, error) {
num, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err)
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err)
}
return uint64(num), nil
}
// LastEpochBlock receives block number of current FrostFS epoch
// through the Netmap contract call.
-func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
+func (c *Client) LastEpochBlock() (uint32, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(lastEpochBlockMethod)
- items, err := c.client.TestInvoke(ctx, prm)
+ items, err := c.client.TestInvoke(prm)
if err != nil {
- return 0, fmt.Errorf("test invoke (%s): %w",
+ return 0, fmt.Errorf("could not perform test invocation (%s): %w",
lastEpochBlockMethod, err)
}
@@ -50,7 +49,7 @@ func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) {
block, err := client.IntFromStackItem(items[0])
if err != nil {
- return 0, fmt.Errorf("get number from stack item (%s): %w",
+ return 0, fmt.Errorf("could not get number from stack item (%s): %w",
lastEpochBlockMethod, err)
}
return uint32(block), nil
diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go
index 0e1f9186b..742165b9a 100644
--- a/pkg/morph/client/netmap/innerring.go
+++ b/pkg/morph/client/netmap/innerring.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"crypto/elliptic"
"fmt"
@@ -24,7 +23,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) {
}
// UpdateInnerRing updates inner ring keys.
-func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
+func (c *Client) UpdateInnerRing(p UpdateIRPrm) error {
args := make([][]byte, len(p.keys))
for i := range args {
args[i] = p.keys[i].Bytes()
@@ -35,18 +34,17 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error {
prm.SetArgs(args)
prm.InvokePrmOptional = p.InvokePrmOptional
- _, err := c.client.Invoke(ctx, prm)
- return err
+ return c.client.Invoke(prm)
}
// GetInnerRingList return current IR list.
-func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) {
+func (c *Client) GetInnerRingList() (keys.PublicKeys, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(innerRingListMethod)
- prms, err := c.client.TestInvoke(ctx, invokePrm)
+ prms, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err)
}
return irKeysFromStackItem(prms, innerRingListMethod)
@@ -59,7 +57,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys
irs, err := client.ArrayFromStackItem(stack[0])
if err != nil {
- return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err)
+ return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
}
irKeys := make(keys.PublicKeys, len(irs))
@@ -79,7 +77,7 @@ const irNodeFixedPrmNumber = 1
func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
prms, err := client.ArrayFromStackItem(prm)
if err != nil {
- return nil, fmt.Errorf("get stack item array (IRNode): %w", err)
+ return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err)
} else if ln := len(prms); ln != irNodeFixedPrmNumber {
return nil, fmt.Errorf(
"unexpected stack item count (IRNode): expected %d, has %d",
@@ -90,7 +88,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) {
byteKey, err := client.BytesFromStackItem(prms[0])
if err != nil {
- return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err)
+ return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err)
}
return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256())
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 97782fc25..61bbf5f17 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
@@ -12,14 +11,14 @@ import (
// GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and
// decodes netmap.NetMap from the response.
-func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(epochSnapshotMethod)
invokePrm.SetArgs(epoch)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
epochSnapshotMethod, err)
}
@@ -35,13 +34,13 @@ func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.Ne
// GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo
// from the response.
-func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
+func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapCandidatesMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err)
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err)
}
if len(res) > 0 {
@@ -52,13 +51,13 @@ func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) {
}
// NetMap calls "netmap" method and decode netmap.NetMap from the response.
-func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) {
+func (c *Client) NetMap() (*netmap.NetMap, error) {
invokePrm := client.TestInvokePrm{}
invokePrm.SetMethod(netMapMethod)
- res, err := c.client.TestInvoke(ctx, invokePrm)
+ res, err := c.client.TestInvoke(invokePrm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w",
+ return nil, fmt.Errorf("could not perform test invocation (%s): %w",
netMapMethod, err)
}
@@ -137,11 +136,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetStatus(netmap.Online)
+ dst.SetOnline()
case netmapcontract.NodeStateOffline:
- dst.SetStatus(netmap.Offline)
+ dst.SetOffline()
case netmapcontract.NodeStateMaintenance:
- dst.SetStatus(netmap.Maintenance)
+ dst.SetMaintenance()
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index e686e271e..a8a306197 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetStatus(netmap.Offline)
+ expected[i].SetOffline()
case int(netmapcontract.NodeStateOnline):
- expected[i].SetStatus(netmap.Online)
+ expected[i].SetOnline()
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetStatus(netmap.Maintenance)
+ expected[i].SetMaintenance()
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch expected[i].Status() {
- case netmap.Online:
+ switch {
+ case expected[i].IsOnline():
state = int64(netmapcontract.NodeStateOnline)
- case netmap.Offline:
+ case expected[i].IsOffline():
state = int64(netmapcontract.NodeStateOffline)
- case netmap.Maintenance:
+ case expected[i].IsMaintenance():
state = int64(netmapcontract.NodeStateMaintenance)
}
diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go
index 341b20935..7a63f14d7 100644
--- a/pkg/morph/client/netmap/new_epoch.go
+++ b/pkg/morph/client/netmap/new_epoch.go
@@ -1,7 +1,6 @@
package netmap
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -9,32 +8,17 @@ import (
// NewEpoch updates FrostFS epoch number through
// Netmap contract call.
-func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error {
+// If `force` is true, this call is normally initiated by a control
+// service command and uses a control notary transaction internally
+// to ensure all nodes produce the same transaction with high probability.
+func (c *Client) NewEpoch(epoch uint64, force bool) error {
prm := client.InvokePrm{}
prm.SetMethod(newEpochMethod)
prm.SetArgs(epoch)
+ prm.SetControlTX(force)
- _, err := c.client.Invoke(ctx, prm)
- if err != nil {
- return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
+ if err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err)
}
return nil
}
-
-// NewEpochControl updates FrostFS epoch number through
-// control notary transaction internally to ensure all
-// nodes produce the same transaction with high probability.
-// If vub > 0, vub will be used as valid until block value.
-func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) {
- prm := client.InvokePrm{}
- prm.SetMethod(newEpochMethod)
- prm.SetArgs(epoch)
- prm.SetControlTX(true)
- prm.SetVUB(vub)
-
- res, err := c.client.Invoke(ctx, prm)
- if err != nil {
- return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err)
- }
- return res.VUB, nil
-}
diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go
index e83acde39..7ceaa0250 100644
--- a/pkg/morph/client/netmap/peer.go
+++ b/pkg/morph/client/netmap/peer.go
@@ -1,16 +1,12 @@
package netmap
import (
- "context"
- "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
-var errFailedToRemovePeerWithoutNotary = errors.New("peer can be forcefully removed only in notary environment")
-
// AddPeerPrm groups parameters of AddPeer operation.
type AddPeerPrm struct {
nodeInfo netmap.NodeInfo
@@ -25,8 +21,8 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
// AddPeer registers peer in FrostFS network through
// Netmap contract call.
-func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
- method := addPeerMethod
+func (c *Client) AddPeer(p AddPeerPrm) error {
+ var method = addPeerMethod
if c.client.WithNotary() && c.client.IsAlpha() {
// In notary environments Alphabet must calls AddPeerIR method instead of AddPeer.
@@ -40,27 +36,24 @@ func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error {
prm.SetArgs(p.nodeInfo.Marshal())
prm.InvokePrmOptional = p.InvokePrmOptional
- if _, err := c.client.Invoke(ctx, prm); err != nil {
- return fmt.Errorf("invoke method (%s): %w", method, err)
+ if err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke method (%s): %w", method, err)
}
return nil
}
// ForceRemovePeer marks the given peer as offline via a notary control transaction.
-// If vub > 0, vub will be used as valid until block value.
-func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) {
+func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo) error {
if !c.client.WithNotary() {
- return 0, errFailedToRemovePeerWithoutNotary
+ return fmt.Errorf("peer can be forcefully removed only in notary environment")
}
prm := UpdatePeerPrm{}
prm.SetKey(nodeInfo.PublicKey())
prm.SetControlTX(true)
- prm.SetVUB(vub)
- res, err := c.UpdatePeerState(ctx, prm)
- if err != nil {
- return 0, fmt.Errorf("updating peer state: %v", err)
+ if err := c.UpdatePeerState(prm); err != nil {
+ return fmt.Errorf("updating peer state: %v", err)
}
- return res.VUB, nil
+ return nil
}
diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go
index 9dbec1a90..ba2c26af7 100644
--- a/pkg/morph/client/netmap/snapshot.go
+++ b/pkg/morph/client/netmap/snapshot.go
@@ -1,22 +1,19 @@
package netmap
import (
- "context"
- "fmt"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
// GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response.
-func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
+func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(snapshotMethod)
prm.SetArgs(diff)
- res, err := c.client.TestInvoke(ctx, prm)
+ res, err := c.client.TestInvoke(prm)
if err != nil {
- return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err)
+ return nil, err
}
return DecodeNetMap(res)
diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go
index f9f639c19..02967453b 100644
--- a/pkg/morph/client/netmap/update_state.go
+++ b/pkg/morph/client/netmap/update_state.go
@@ -1,7 +1,7 @@
package netmap
import (
- "context"
+ "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@@ -36,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() {
}
// UpdatePeerState changes peer status through Netmap contract call.
-func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) {
+func (c *Client) UpdatePeerState(p UpdatePeerPrm) error {
method := updateStateMethod
if c.client.WithNotary() && c.client.IsAlpha() {
@@ -55,5 +55,8 @@ func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.I
prm.SetArgs(int64(p.state), p.key)
prm.InvokePrmOptional = p.InvokePrmOptional
- return c.client.Invoke(ctx, prm)
+ if err := c.client.Invoke(prm); err != nil {
+ return fmt.Errorf("could not invoke smart contract: %w", err)
+ }
+ return nil
}
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index bc00eb889..758b220a2 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -8,12 +8,14 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
- nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@@ -31,12 +33,14 @@ const (
NNSProxyContractName = "proxy.frostfs"
// NNSGroupKeyName is a name for the FrostFS group key record in NNS.
NNSGroupKeyName = "group.frostfs"
- // NNSPolicyContractName is a name of the policy contract in NNS.
- NNSPolicyContractName = "policy.frostfs"
)
-// ErrNNSRecordNotFound means that there is no such record in NNS contract.
-var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+var (
+ // ErrNNSRecordNotFound means that there is no such record in NNS contract.
+ ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
+
+ errEmptyResultStack = errors.New("returned result stack is empty")
+)
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@@ -55,36 +59,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
- sh, err = nnsResolve(c.nnsReader, name)
+ nnsHash, err := c.NNSHash()
+ if err != nil {
+ return util.Uint160{}, err
+ }
+
+ sh, err = nnsResolve(c.client, nnsHash, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
-func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
- available, err := r.IsAvailable(domain)
- if err != nil {
- return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
+// NNSHash returns NNS contract hash.
+func (c *Client) NNSHash() (util.Uint160, error) {
+ c.switchLock.RLock()
+ defer c.switchLock.RUnlock()
+
+ if c.inactive {
+ return util.Uint160{}, ErrConnectionLost
}
- if available {
+ success := false
+ startedAt := time.Now()
+
+ defer func() {
+ c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
+ }()
+
+ nnsHash := c.cache.nns()
+
+ if nnsHash == nil {
+ cs, err := c.client.GetContractStateByID(nnsContractID)
+ if err != nil {
+ return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
+ }
+
+ c.cache.setNNSHash(cs.Hash)
+ nnsHash = &cs.Hash
+ }
+ success = true
+ return *nnsHash, nil
+}
+
+func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
+ found, err := exists(c, nnsHash, domain)
+ if err != nil {
+ return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err)
+ }
+
+ if !found {
return nil, ErrNNSRecordNotFound
}
- return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
+ result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ {
+ Type: smartcontract.IntegerType,
+ Value: big.NewInt(int64(nns.TXT)),
+ },
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+ if result.State != vmstate.Halt.String() {
+ return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
+ }
+ if len(result.Stack) == 0 {
+ return nil, errEmptyResultStack
+ }
+ return result.Stack[0], nil
}
-func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
- arr, err := nnsResolveItem(r, domain)
+func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
+ res, err := nnsResolveItem(c, nnsHash, domain)
if err != nil {
return util.Uint160{}, err
}
- if len(arr) == 0 {
- return util.Uint160{}, errors.New("NNS record is missing")
+ // Parse the result of resolving NNS record.
+ // It works with multiple formats (corresponding to multiple NNS versions).
+ // If array of hashes is provided, it returns only the first one.
+ if arr, ok := res.Value().([]stackitem.Item); ok {
+ if len(arr) == 0 {
+ return util.Uint160{}, errors.New("NNS record is missing")
+ }
+ res = arr[0]
}
- bs, err := arr[0].TryBytes()
+ bs, err := res.TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@@ -104,6 +169,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error
return util.Uint160{}, errors.New("no valid hashes are found")
}
+func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
+ result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
+ {
+ Type: smartcontract.StringType,
+ Value: domain,
+ },
+ }, nil)
+ if err != nil {
+ return false, err
+ }
+
+ if len(result.Stack) == 0 {
+ return false, errEmptyResultStack
+ }
+
+ res := result.Stack[0]
+
+ available, err := res.TryBool()
+ if err != nil {
+ return false, fmt.Errorf("malformed response: %w", err)
+ }
+
+ // not available means that it is taken
+ // and, therefore, exists
+ return !available, nil
+}
+
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@@ -147,12 +239,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
- arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
+ nnsHash, err := c.NNSHash()
if err != nil {
return nil, err
}
- if len(arr) == 0 {
+ item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
+ if err != nil {
+ return nil, err
+ }
+
+ arr, ok := item.Value().([]stackitem.Item)
+ if !ok || len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 448702613..17644361a 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,7 +1,6 @@
package client
import (
- "context"
"crypto/elliptic"
"encoding/binary"
"errors"
@@ -38,7 +37,8 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
- proxy util.Uint160
+ notary util.Uint160
+ proxy util.Uint160
}
notaryCfg struct {
@@ -57,11 +57,16 @@ const (
defaultNotaryValidTime = 50
defaultNotaryRoundTime = 100
- setDesignateMethod = "designateAsRole"
+ notaryBalanceOfMethod = "balanceOf"
+ notaryExpirationOfMethod = "expirationOf"
+ setDesignateMethod = "designateAsRole"
+ notaryBalanceErrMsg = "can't fetch notary balance"
notaryNotEnabledPanicMsg = "notary support was not enabled on this client"
)
+var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack")
+
func defaultNotaryConfig(c *Client) *notaryCfg {
return ¬aryCfg{
txValidTime: defaultNotaryValidTime,
@@ -101,6 +106,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
+ notary: notary.Hash,
}
c.notary = notaryCfg
@@ -134,7 +140,7 @@ func (c *Client) ProbeNotary() (res bool) {
// use this function.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) {
+func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -148,18 +154,20 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err)
}
- r := notary.NewReader(c.rpcActor)
- currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash())
+ currentTill, err := c.depositExpirationOf()
if err != nil {
- return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
}
- till := max(int64(bc+delta), int64(currentTill))
- res, _, err := c.depositNotary(ctx, amount, till)
- return res, err
+ till := int64(bc + delta)
+ if till < currentTill {
+ till = currentTill
+ }
+
+ return c.depositNotary(amount, till)
}
// DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`,
@@ -167,12 +175,12 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta
// This allows to avoid ValidAfterDeposit failures.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) {
+func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return util.Uint256{}, 0, ErrConnectionLost
+ return util.Uint256{}, ErrConnectionLost
}
if c.notary == nil {
@@ -180,37 +188,37 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8)
}
// till value refers to a block height and it is uint32 value in neo-go
- return c.depositNotary(ctx, amount, math.MaxUint32)
+ return c.depositNotary(amount, math.MaxUint32)
}
-func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
+func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
- notary.Hash,
+ c.notary.notary,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {
if !errors.Is(err, neorpc.ErrAlreadyExists) {
- return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err)
+ return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err)
}
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade,
+ c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Error(err))
- return util.Uint256{}, 0, nil
+ return util.Uint256{}, nil
}
- c.logger.Info(ctx, logs.ClientNotaryDepositInvoke,
+ c.logger.Info(logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
- return txHash, vub, nil
+ return txHash, nil
}
// GetNotaryDeposit returns deposit of client's account in notary contract.
@@ -231,10 +239,18 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) {
sh := c.acc.PrivateKey().PublicKey().GetScriptHash()
- r := notary.NewReader(c.rpcActor)
- bigIntDeposit, err := r.BalanceOf(sh)
+ items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh)
if err != nil {
- return 0, fmt.Errorf("get notary deposit: %w", err)
+ return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)
+ }
+
+ if len(items) != 1 {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems))
+ }
+
+ bigIntDeposit, err := items[0].TryInteger()
+ if err != nil {
+ return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err))
}
return bigIntDeposit.Int64(), nil
@@ -261,7 +277,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) {
// committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error {
+func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -273,13 +289,12 @@ func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm)
panic(notaryNotEnabledPanicMsg)
}
- nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
+ nonce, vub, err := c.CalculateNonceAndVUB(prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -310,7 +325,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) {
// Requires committee multi signature.
//
// This function must be invoked with notary enabled otherwise it throws panic.
-func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error {
+func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -322,13 +337,12 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
panic(notaryNotEnabledPanicMsg)
}
- nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash)
+ nonce, vub, err := c.CalculateNonceAndVUB(prm.hash)
if err != nil {
- return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err)
+ return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err)
}
return c.notaryInvokeAsCommittee(
- ctx,
setDesignateMethod,
nonce,
vub,
@@ -341,22 +355,20 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet
// blockchain. Fallback tx is a `RET`. If Notary support is not enabled
// it fallbacks to a simple `Invoke()`.
//
-// Returns valid until block value.
-//
// `nonce` and `vub` are used only if notary is enabled.
-func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...)
+ return c.notaryInvoke(false, true, contract, nonce, vub, method, args...)
}
// NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's
@@ -364,19 +376,19 @@ func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fi
// not expected to be signed by the current node.
//
// Considered to be used by non-IR nodes.
-func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
- return InvokeRes{}, ErrConnectionLost
+ return ErrConnectionLost
}
if c.notary == nil {
- return c.Invoke(ctx, contract, fee, method, args...)
+ return c.Invoke(contract, fee, method, args...)
}
- return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...)
+ return c.notaryInvoke(false, false, contract, rand.Uint32(), nil, method, args...)
}
// NotarySignAndInvokeTX signs and sends notary request that was received from
@@ -393,7 +405,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return fmt.Errorf("fetch current alphabet keys: %w", err)
+ return fmt.Errorf("could not fetch current alphabet keys: %w", err)
}
cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList)
@@ -418,7 +430,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.String("tx_hash", mainH.StringLE()),
zap.Uint32("valid_until_block", untilActual),
zap.String("fallback_hash", fbH.StringLE()))
@@ -426,13 +438,12 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return nil
}
-func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error {
+func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error {
designate := c.GetDesignateHash()
- _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...)
- return err
+ return c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
}
-func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) {
+func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) error {
start := time.Now()
success := false
defer func() {
@@ -441,27 +452,27 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
alphabetList, err := c.notary.alphabetSource()
if err != nil {
- return InvokeRes{}, err
+ return err
}
until, err := c.getUntilValue(vub)
if err != nil {
- return InvokeRes{}, err
+ return err
}
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
- return InvokeRes{}, err
+ return err
}
nAct, err := notary.NewActor(c.client, cosigners, c.acc)
if err != nil {
- return InvokeRes{}, err
+ return err
}
mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != vmstate.Halt.String() {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.ValidUntilBlock = until
@@ -471,17 +482,17 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo
}, args...))
if err != nil && !alreadyOnChainError(err) {
- return InvokeRes{}, err
+ return err
}
- c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked,
+ c.logger.Debug(logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", untilActual),
zap.String("tx_hash", mainH.StringLE()),
zap.String("fallback_hash", fbH.StringLE()))
success = true
- return InvokeRes{Hash: mainH, VUB: until}, nil
+ return nil
}
func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) {
@@ -515,24 +526,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet
if ok {
pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err)
}
acc = notary.FakeSimpleAccount(pub)
} else {
m, pubsBytes, ok := vm.ParseMultiSigContract(script)
if !ok {
- return nil, errors.New("parse verification script of signer #2: unknown witness type")
+ return nil, errors.New("failed to parse verification script of signer #2: unknown witness type")
}
pubs := make(keys.PublicKeys, len(pubsBytes))
for i := range pubs {
pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256())
if err != nil {
- return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err)
+ return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err)
}
}
acc, err = notary.FakeMultisigAccount(m, pubs)
if err != nil {
- return nil, fmt.Errorf("create fake account for signer #2: %w", err)
+ return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err)
}
}
}
@@ -555,11 +566,7 @@ func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, comm
s[0] = actor.SignerAccount{
Signer: transaction.Signer{
Account: c.notary.proxy,
- // Do not change this:
- // We must be able to call NNS contract indirectly from the Container contract.
- // Thus, CalledByEntry is not sufficient.
- // In future we may restrict this to all the usecases we have.
- Scopes: transaction.Global,
+ Scopes: transaction.None,
},
Account: notary.FakeContractAccount(c.notary.proxy),
}
@@ -608,7 +615,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey())
err := multisigAccount.ConvertMultisig(m, ir)
if err != nil {
- return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err))
}
} else {
// alphabet multisig redeem script is
@@ -616,7 +624,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
// inner ring multiaddress witness
multisigAccount, err = notary.FakeMultisigAccount(m, ir)
if err != nil {
- return nil, fmt.Errorf("make inner ring multisig wallet: %w", err)
+ // wrap error as FrostFS-specific since the call is not related to any client
+ return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err))
}
}
@@ -626,15 +635,33 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB
func (c *Client) notaryTxValidationLimit() (uint32, error) {
bc, err := c.rpcActor.GetBlockCount()
if err != nil {
- return 0, fmt.Errorf("get current blockchain height: %w", err)
+ return 0, fmt.Errorf("can't get current blockchain height: %w", err)
}
- minTime := bc + c.notary.txValidTime
- rounded := (minTime/c.notary.roundTime + 1) * c.notary.roundTime
+ min := bc + c.notary.txValidTime
+ rounded := (min/c.notary.roundTime + 1) * c.notary.roundTime
return rounded, nil
}
+func (c *Client) depositExpirationOf() (int64, error) {
+ expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash())
+ if err != nil {
+ return 0, fmt.Errorf("can't invoke method: %w", err)
+ }
+
+ if len(expirationRes) != 1 {
+ return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes))
+ }
+
+ currentTillBig, err := expirationRes[0].TryInteger()
+ if err != nil {
+ return 0, fmt.Errorf("can't parse deposit till value: %w", err)
+ }
+
+ return currentTillBig.Int64(), nil
+}
+
// sigCount returns the number of required signature.
// For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT).
// If committee is true, returns M as N/2+1.
@@ -679,17 +706,13 @@ func WithProxyContract(h util.Uint160) NotaryOption {
}
}
-// Neo RPC node can return `neorpc.ErrInvalidAttribute` error with
+// Neo RPC node can return `core.ErrInvalidAttribute` error with
// `conflicting transaction <> is already on chain` message. This
// error is expected and ignored. As soon as main tx persisted on
// chain everything is fine. This happens because notary contract
// requires 5 out of 7 signatures to send main tx, thus last two
// notary requests may be processed after main tx appeared on chain.
func alreadyOnChainError(err error) bool {
- if !errors.Is(err, neorpc.ErrInvalidAttribute) {
- return false
- }
-
const alreadyOnChainErrorMessage = "already on chain"
return strings.Contains(err.Error(), alreadyOnChainErrorMessage)
@@ -708,12 +731,12 @@ func alreadyOnChainError(err error) bool {
func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) {
notaryBalance, err := c.GetNotaryDeposit()
if err != nil {
- return 0, fmt.Errorf("get notary balance: %w", err)
+ return 0, fmt.Errorf("could not get notary balance: %w", err)
}
gasBalance, err := c.GasBalance()
if err != nil {
- return 0, fmt.Errorf("get GAS balance: %w", err)
+ return 0, fmt.Errorf("could not get GAS balance: %w", err)
}
if gasBalance == 0 {
@@ -733,19 +756,17 @@ func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed
// CalculateNonceAndVUB calculates nonce and ValidUntilBlock values
// based on transaction hash.
-func (c *Client) CalculateNonceAndVUB(hash *util.Uint256) (nonce uint32, vub uint32, err error) {
+func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint32, err error) {
return c.calculateNonceAndVUB(hash, false)
}
// CalculateNonceAndVUBControl calculates nonce and rounded ValidUntilBlock values
// based on transaction hash for use in control transactions.
-func (c *Client) CalculateNonceAndVUBControl(hash *util.Uint256) (nonce uint32, vub uint32, err error) {
+func (c *Client) CalculateNonceAndVUBControl(hash util.Uint256) (nonce uint32, vub uint32, err error) {
return c.calculateNonceAndVUB(hash, true)
}
-// If hash specified, transaction's height and hash are used to compute VUB and nonce.
-// If not, then current block height used to compute VUB and nonce.
-func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) (nonce uint32, vub uint32, err error) {
+func (c *Client) calculateNonceAndVUB(hash util.Uint256, roundBlockHeight bool) (nonce uint32, vub uint32, err error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
@@ -757,18 +778,11 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
return 0, 0, nil
}
- var height uint32
+ nonce = binary.LittleEndian.Uint32(hash.BytesLE())
- if hash != nil {
- height, err = c.getTransactionHeight(*hash)
- if err != nil {
- return 0, 0, fmt.Errorf("get transaction height: %w", err)
- }
- } else {
- height, err = c.rpcActor.GetBlockCount()
- if err != nil {
- return 0, 0, fmt.Errorf("get chain height: %w", err)
- }
+ height, err := c.getTransactionHeight(hash)
+ if err != nil {
+ return 0, 0, fmt.Errorf("could not get transaction height: %w", err)
}
// For control transactions, we round down the block height to control the
@@ -779,10 +793,7 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool)
height = height / inc * inc
}
- if hash != nil {
- return binary.LittleEndian.Uint32(hash.BytesLE()), height + c.notary.txValidTime, nil
- }
- return height + c.notary.txValidTime, height + c.notary.txValidTime, nil
+ return nonce, height + c.notary.txValidTime, nil
}
func (c *Client) getTransactionHeight(h util.Uint256) (uint32, error) {
diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go
index 35204bb36..121dccfb7 100644
--- a/pkg/morph/client/notifications.go
+++ b/pkg/morph/client/notifications.go
@@ -16,10 +16,7 @@ func (c *Client) Close() {
// closing should be done via the channel
// to prevent switching to another RPC node
// in the notification loop
- if c.closed.CompareAndSwap(false, true) {
- close(c.closeChan)
- }
- c.wg.Wait()
+ close(c.closeChan)
}
// ReceiveExecutionNotifications performs subscription for notifications
@@ -73,7 +70,7 @@ func (c *Client) ReceiveNotaryRequests(txSigner util.Uint160, ch chan<- *result.
return "", ErrConnectionLost
}
- return c.client.ReceiveNotaryRequests(&neorpc.NotaryRequestFilter{Signer: &txSigner}, ch)
+ return c.client.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &txSigner}, ch)
}
// Unsubscribe performs unsubscription for the given subscription ID.
diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go
index c4eb120d2..7aa17a70f 100644
--- a/pkg/morph/client/static.go
+++ b/pkg/morph/client/static.go
@@ -1,10 +1,8 @@
package client
import (
- "context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -102,8 +100,6 @@ type InvokePrmOptional struct {
// It's only used by notary transactions and it affects only the
// computation of `validUntilBlock` values.
controlTX bool
- // vub is used to set custom valid until block value.
- vub uint32
}
// SetHash sets optional hash of the transaction.
@@ -119,21 +115,6 @@ func (i *InvokePrmOptional) SetControlTX(b bool) {
i.controlTX = b
}
-// IsControl gets whether a control transaction will be used.
-func (i *InvokePrmOptional) IsControl() bool {
- return i.controlTX
-}
-
-// SetVUB sets valid until block value.
-func (i *InvokePrmOptional) SetVUB(v uint32) {
- i.vub = v
-}
-
-type InvokeRes struct {
- Hash util.Uint256
- VUB uint32
-}
-
// Invoke calls Invoke method of Client with static internal script hash and fee.
// Supported args types are the same as in Client.
//
@@ -143,45 +124,36 @@ type InvokeRes struct {
//
// If fee for the operation executed using specified method is customized, then StaticClient uses it.
// Otherwise, default fee is used.
-func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) {
- var vubP *uint32
+func (s StaticClient) Invoke(prm InvokePrm) error {
if s.tryNotary {
if s.alpha {
var (
nonce uint32 = 1
+ vubP *uint32
vub uint32
err error
)
if prm.hash != nil {
if prm.controlTX {
- nonce, vub, err = s.client.CalculateNonceAndVUBControl(prm.hash)
+ nonce, vub, err = s.client.CalculateNonceAndVUBControl(*prm.hash)
} else {
- nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash)
+ nonce, vub, err = s.client.CalculateNonceAndVUB(*prm.hash)
}
if err != nil {
- return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err)
+ return fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err)
}
vubP = &vub
}
- if prm.vub > 0 {
- vubP = &prm.vub
- }
-
- return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
+ return s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...)
}
- if prm.vub > 0 {
- vubP = &prm.vub
- }
-
- return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...)
+ return s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, prm.method, prm.args...)
}
return s.client.Invoke(
- ctx,
s.scScriptHash,
s.fee,
prm.method,
@@ -206,9 +178,7 @@ func (ti *TestInvokePrm) SetArgs(args ...any) {
}
// TestInvoke calls TestInvoke method of Client with static internal script hash.
-func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) {
- _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method)
- defer span.End()
+func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) {
return s.client.TestInvoke(
s.scScriptHash,
prm.method,
diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go
index f7b6705a8..ff8e507fe 100644
--- a/pkg/morph/client/util.go
+++ b/pkg/morph/client/util.go
@@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
case stackitem.IntegerT:
n, err := param.TryInteger()
if err != nil {
- return nil, fmt.Errorf("parse integer bytes: %w", err)
+ return nil, fmt.Errorf("can't parse integer bytes: %w", err)
}
return n.Bytes(), nil
@@ -69,7 +69,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) {
// ArrayFromStackItem returns the slice contract parameters from passed parameter.
//
-// If passed parameter carries boolean false value, returns (nil, nil).
+// If passed parameter carries boolean false value, (nil, nil) returns.
func ArrayFromStackItem(param stackitem.Item) ([]stackitem.Item, error) {
switch param.Type() {
case stackitem.AnyT:
@@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) {
func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error {
return func(r *result.Invoke, t *transaction.Transaction) error {
if r.State != HaltState {
- return ¬HaltStateError{state: r.State, exception: r.FaultException}
+ return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException})
}
t.SystemFee += add
diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go
deleted file mode 100644
index 87fcf84b8..000000000
--- a/pkg/morph/client/waiter.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client
-
-import (
- "context"
- "fmt"
-
- "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
- "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
- "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
-)
-
-type waiterClient struct {
- c *Client
-}
-
-func (w *waiterClient) Context() context.Context {
- return context.Background()
-}
-
-func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
- return w.c.GetApplicationLog(hash, trig)
-}
-
-func (w *waiterClient) GetBlockCount() (uint32, error) {
- return w.c.BlockCount()
-}
-
-func (w *waiterClient) GetVersion() (*result.Version, error) {
- return w.c.GetVersion()
-}
-
-// WaitTxHalt waits until transaction with the specified hash persists on the blockchain.
-// It also checks execution result to finish in HALT state.
-func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error {
- w, err := waiter.NewPollingBased(&waiterClient{c: c})
- if err != nil {
- return fmt.Errorf("create tx waiter: %w", err)
- }
-
- res, err := w.WaitAny(ctx, vub, h)
- if err != nil {
- return fmt.Errorf("wait until tx persists: %w", err)
- }
-
- if res.VMState.HasFlag(vmstate.Halt) {
- return nil
- }
- return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}
-}
diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go
index 99f80584a..062a2a886 100644
--- a/pkg/morph/event/balance/lock.go
+++ b/pkg/morph/event/balance/lock.go
@@ -3,7 +3,7 @@ package balance
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -47,17 +47,61 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash }
// ParseLock from notification into lock structure.
func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) {
- var le balance.LockEvent
- if err := le.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse balance.LockEvent: %w", err)
+ var (
+ ev Lock
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Lock{
- id: le.TxID,
- user: le.From,
- lock: le.To,
- amount: le.Amount.Int64(),
- until: le.Until.Int64(),
- txHash: e.Container,
- }, nil
+ if ln := len(params); ln != 5 {
+ return nil, event.WrongNumberOfParameters(5, ln)
+ }
+
+ // parse id
+ ev.id, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock user value: %w", err)
+ }
+
+ ev.user, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock account value: %w", err)
+ }
+
+ ev.lock, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.amount, err = client.IntFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock amount: %w", err)
+ }
+
+ // parse until deadline
+ ev.until, err = client.IntFromStackItem(params[4])
+ if err != nil {
+ return nil, fmt.Errorf("could not get lock deadline: %w", err)
+ }
+
+ ev.txHash = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go
index 87b91aede..9199bcd55 100644
--- a/pkg/morph/event/balance/lock_test.go
+++ b/pkg/morph/event/balance/lock_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
@@ -27,7 +28,7 @@ func TestParseLock(t *testing.T) {
}
_, err := ParseLock(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go
index d28f6d521..a206307f8 100644
--- a/pkg/morph/event/container/delete.go
+++ b/pkg/morph/event/container/delete.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -58,14 +58,28 @@ func (DeleteSuccess) MorphEvent() {}
// ParseDeleteSuccess decodes notification event thrown by Container contract into
// DeleteSuccess and returns it as event.Event.
func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var dse container.DeleteSuccessEvent
- if err := dse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(dse.ContainerID)
- return DeleteSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumDeleteSuccess = 1
+
+ if ln := len(items); ln != expectedItemNumDeleteSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ var res DeleteSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go
index 62e7d7277..627c5fcf5 100644
--- a/pkg/morph/event/container/delete_test.go
+++ b/pkg/morph/event/container/delete_test.go
@@ -4,6 +4,7 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -17,7 +18,7 @@ func TestParseDeleteSuccess(t *testing.T) {
}
_, err := ParseDeleteSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong container parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
new file mode 100644
index 000000000..4168d8842
--- /dev/null
+++ b/pkg/morph/event/container/eacl.go
@@ -0,0 +1,51 @@
+package container
+
+import (
+ "github.com/nspcc-dev/neo-go/pkg/network/payload"
+)
+
+// SetEACL represents structure of notification about
+// modified eACL table coming from FrostFS Container contract.
+type SetEACL struct {
+ TableValue []byte
+ SignatureValue []byte
+ PublicKeyValue []byte
+ TokenValue []byte
+
+ // For notary notifications only.
+ // Contains raw transactions of notary request.
+ NotaryRequestValue *payload.P2PNotaryRequest
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (SetEACL) MorphEvent() {}
+
+// Table returns returns eACL table in a binary FrostFS API format.
+func (x SetEACL) Table() []byte {
+ return x.TableValue
+}
+
+// Signature returns signature of the binary table.
+func (x SetEACL) Signature() []byte {
+ return x.SignatureValue
+}
+
+// PublicKey returns public keys of container
+// owner in a binary format.
+func (x SetEACL) PublicKey() []byte {
+ return x.PublicKeyValue
+}
+
+// SessionToken returns binary token of the session
+// within which the eACL was set.
+func (x SetEACL) SessionToken() []byte {
+ return x.TokenValue
+}
+
+// NotaryRequest returns raw notary request if notification
+// was received via notary service. Otherwise, returns nil.
+func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
+ return x.NotaryRequestValue
+}
+
+const expectedItemNumEACL = 4
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
new file mode 100644
index 000000000..a4fe7c966
--- /dev/null
+++ b/pkg/morph/event/container/eacl_notary.go
@@ -0,0 +1,75 @@
+package container
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+)
+
+func (x *SetEACL) setTable(v []byte) {
+ if v != nil {
+ x.TableValue = v
+ }
+}
+
+func (x *SetEACL) setSignature(v []byte) {
+ if v != nil {
+ x.SignatureValue = v
+ }
+}
+
+func (x *SetEACL) setPublicKey(v []byte) {
+ if v != nil {
+ x.PublicKeyValue = v
+ }
+}
+
+func (x *SetEACL) setToken(v []byte) {
+ if v != nil {
+ x.TokenValue = v
+ }
+}
+
+var setEACLFieldSetters = []func(*SetEACL, []byte){
+ // order on stack is reversed
+ (*SetEACL).setToken,
+ (*SetEACL).setPublicKey,
+ (*SetEACL).setSignature,
+ (*SetEACL).setTable,
+}
+
+const (
+ // SetEACLNotaryEvent is method name for container EACL operations
+ // in `Container` contract. Is used as identificator for notary
+ // EACL changing requests.
+ SetEACLNotaryEvent = "setEACL"
+)
+
+// ParseSetEACLNotary from NotaryEvent into container event structure.
+func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
+ var (
+ ev SetEACL
+ currentOp opcode.Opcode
+ )
+
+ fieldNum := 0
+
+ for _, op := range ne.Params() {
+ currentOp = op.Code()
+
+ switch {
+ case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
+ if fieldNum == expectedItemNumEACL {
+ return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
+ }
+
+ setEACLFieldSetters[fieldNum](&ev, op.Param())
+ fieldNum++
+ default:
+ return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
+ }
+ }
+
+ ev.NotaryRequestValue = ne.Raw()
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/container/util_test.go b/pkg/morph/event/container/eacl_test.go
similarity index 100%
rename from pkg/morph/event/container/util_test.go
rename to pkg/morph/event/container/eacl_test.go
diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go
new file mode 100644
index 000000000..9fd21e2b5
--- /dev/null
+++ b/pkg/morph/event/container/estimates.go
@@ -0,0 +1,78 @@
+package container
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+// StartEstimation structure of container.StartEstimation notification from
+// morph chain.
+type StartEstimation struct {
+ epoch uint64
+}
+
+// StopEstimation structure of container.StopEstimation notification from
+// morph chain.
+type StopEstimation struct {
+ epoch uint64
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (StartEstimation) MorphEvent() {}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (StopEstimation) MorphEvent() {}
+
+// Epoch returns epoch value for which to start container size estimation.
+func (s StartEstimation) Epoch() uint64 { return s.epoch }
+
+// Epoch returns epoch value for which to stop container size estimation.
+func (s StopEstimation) Epoch() uint64 { return s.epoch }
+
+// ParseStartEstimation from notification into container event structure.
+func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ epoch, err := parseEstimation(params)
+ if err != nil {
+ return nil, err
+ }
+
+ return StartEstimation{epoch: epoch}, nil
+}
+
+// ParseStopEstimation from notification into container event structure.
+func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) {
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ epoch, err := parseEstimation(params)
+ if err != nil {
+ return nil, err
+ }
+
+ return StopEstimation{epoch: epoch}, nil
+}
+
+func parseEstimation(params []stackitem.Item) (uint64, error) {
+ if ln := len(params); ln != 1 {
+ return 0, event.WrongNumberOfParameters(1, ln)
+ }
+
+ // parse container
+ epoch, err := client.IntFromStackItem(params[0])
+ if err != nil {
+ return 0, fmt.Errorf("could not get estimation epoch: %w", err)
+ }
+
+ return uint64(epoch), nil
+}
diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go
new file mode 100644
index 000000000..be46e62c4
--- /dev/null
+++ b/pkg/morph/event/container/estimates_test.go
@@ -0,0 +1,80 @@
+package container
+
+import (
+ "math/big"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStartEstimation(t *testing.T) {
+ var epochNum uint64 = 100
+ epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseStartEstimation(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong estimation parameter", func(t *testing.T) {
+ _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct behavior", func(t *testing.T) {
+ ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{
+ epochItem,
+ }))
+
+ require.NoError(t, err)
+
+ require.Equal(t, StartEstimation{
+ epochNum,
+ }, ev)
+ })
+}
+
+func TestStopEstimation(t *testing.T) {
+ var epochNum uint64 = 100
+ epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum))
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseStopEstimation(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong estimation parameter", func(t *testing.T) {
+ _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct behavior", func(t *testing.T) {
+ ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{
+ epochItem,
+ }))
+
+ require.NoError(t, err)
+
+ require.Equal(t, StopEstimation{
+ epochNum,
+ }, ev)
+ })
+}
diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go
index b09394ba4..335034bf3 100644
--- a/pkg/morph/event/container/put.go
+++ b/pkg/morph/event/container/put.go
@@ -3,7 +3,7 @@ package container
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@@ -78,14 +78,33 @@ func (PutSuccess) MorphEvent() {}
// ParsePutSuccess decodes notification event thrown by Container contract into
// PutSuccess and returns it as event.Event.
func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) {
- var pse container.PutSuccessEvent
- if err := pse.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err)
+ items, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("parse stack array from raw notification event: %w", err)
}
- var cnr cid.ID
- cnr.SetSHA256(pse.ContainerID)
- return PutSuccess{
- ID: cnr,
- }, nil
+ const expectedItemNumPutSuccess = 2
+
+ if ln := len(items); ln != expectedItemNumPutSuccess {
+ return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln)
+ }
+
+ binID, err := client.BytesFromStackItem(items[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse container ID item: %w", err)
+ }
+
+ _, err = client.BytesFromStackItem(items[1])
+ if err != nil {
+ return nil, fmt.Errorf("parse public key item: %w", err)
+ }
+
+ var res PutSuccess
+
+ err = res.ID.Decode(binID)
+ if err != nil {
+ return nil, fmt.Errorf("decode container ID: %w", err)
+ }
+
+ return res, nil
}
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index 6b2ee7b0a..f5779ced6 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNamedNotaryEvent is an ID of notary "put named container" notification.
+ // PutNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go
index dd5c7ea93..3622f9943 100644
--- a/pkg/morph/event/container/put_test.go
+++ b/pkg/morph/event/container/put_test.go
@@ -4,8 +4,8 @@ import (
"crypto/sha256"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) {
}
_, err := ParsePutSuccess(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
})
t.Run("wrong container ID parameter", func(t *testing.T) {
@@ -35,30 +35,18 @@ func TestParsePutSuccess(t *testing.T) {
id.Encode(binID)
t.Run("wrong public key parameter", func(t *testing.T) {
- t.Run("wrong type", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewMap(),
- }))
+ _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(binID),
+ stackitem.NewMap(),
+ }))
- require.Error(t, err)
- })
- t.Run("garbage data", func(t *testing.T) {
- _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewByteArray(binID),
- stackitem.NewByteArray([]byte("key")),
- }))
- require.Error(t, err)
- })
+ require.Error(t, err)
})
t.Run("correct behavior", func(t *testing.T) {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(binID),
- stackitem.NewByteArray(pk.PublicKey().Bytes()),
+ stackitem.NewByteArray([]byte("key")),
}))
require.NoError(t, err)
diff --git a/pkg/morph/event/frostfs/bind.go b/pkg/morph/event/frostfs/bind.go
new file mode 100644
index 000000000..8655b1222
--- /dev/null
+++ b/pkg/morph/event/frostfs/bind.go
@@ -0,0 +1,92 @@
+package frostfs
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/util"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+type Bind struct {
+ BindCommon
+}
+
+type BindCommon struct {
+ UserValue []byte
+ KeysValue [][]byte
+
+ // TxHashValue is used in notary environmental
+ // for calculating unique but same for
+ // all notification receivers values.
+ TxHashValue util.Uint256
+}
+
+// TxHash returns hash of the TX with new epoch
+// notification.
+func (b BindCommon) TxHash() util.Uint256 {
+ return b.TxHashValue
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (BindCommon) MorphEvent() {}
+
+func (b BindCommon) Keys() [][]byte { return b.KeysValue }
+
+func (b BindCommon) User() []byte { return b.UserValue }
+
+func ParseBind(e *state.ContainedNotificationEvent) (event.Event, error) {
+ var (
+ ev Bind
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ err = parseBind(&ev.BindCommon, params)
+ if err != nil {
+ return nil, err
+ }
+
+ ev.TxHashValue = e.Container
+
+ return ev, nil
+}
+
+func parseBind(dst *BindCommon, params []stackitem.Item) error {
+ if ln := len(params); ln != 2 {
+ return event.WrongNumberOfParameters(2, ln)
+ }
+
+ var err error
+
+ // parse user
+ dst.UserValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return fmt.Errorf("could not get bind user: %w", err)
+ }
+
+ // parse keys
+ bindKeys, err := client.ArrayFromStackItem(params[1])
+ if err != nil {
+ return fmt.Errorf("could not get bind keys: %w", err)
+ }
+
+ dst.KeysValue = make([][]byte, 0, len(bindKeys))
+
+ for i := range bindKeys {
+ rawKey, err := client.BytesFromStackItem(bindKeys[i])
+ if err != nil {
+ return fmt.Errorf("could not get bind public key: %w", err)
+ }
+
+ dst.KeysValue = append(dst.KeysValue, rawKey)
+ }
+
+ return nil
+}
diff --git a/pkg/morph/event/frostfs/bind_test.go b/pkg/morph/event/frostfs/bind_test.go
new file mode 100644
index 000000000..38a82b8d4
--- /dev/null
+++ b/pkg/morph/event/frostfs/bind_test.go
@@ -0,0 +1,72 @@
+package frostfs
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseBind(t *testing.T) {
+ var (
+ user = []byte{0x1, 0x2, 0x3}
+ publicKeys = [][]byte{
+ []byte("key1"),
+ []byte("key2"),
+ []byte("key3"),
+ }
+ )
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseBind(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
+ })
+
+ t.Run("wrong first parameter", func(t *testing.T) {
+ _, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("wrong second parameter", func(t *testing.T) {
+ _, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(user),
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct", func(t *testing.T) {
+ ev, err := ParseBind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(user),
+ stackitem.NewArray([]stackitem.Item{
+ stackitem.NewByteArray(publicKeys[0]),
+ stackitem.NewByteArray(publicKeys[1]),
+ stackitem.NewByteArray(publicKeys[2]),
+ }),
+ }))
+ require.NoError(t, err)
+
+ e := ev.(Bind)
+
+ require.Equal(t, user, e.User())
+ require.Equal(t, publicKeys, e.Keys())
+ })
+}
+
+func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
+ return &state.ContainedNotificationEvent{
+ NotificationEvent: state.NotificationEvent{
+ Item: stackitem.NewArray(items),
+ },
+ }
+}
diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go
index cf56464b8..eae2a23f5 100644
--- a/pkg/morph/event/frostfs/cheque.go
+++ b/pkg/morph/event/frostfs/cheque.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,20 +34,53 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue }
// ParseCheque from notification into cheque structure.
func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) {
- var ce frostfs.ChequeEvent
- if err := ce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err)
- }
+ var (
+ ev Cheque
+ err error
+ )
- lock, err := util.Uint160DecodeBytesBE(ce.LockAccount)
+ params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Cheque{
- IDValue: ce.Id,
- AmountValue: ce.Amount.Int64(),
- UserValue: ce.User,
- LockValue: lock,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque id: %w", err)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque amount: %w", err)
+ }
+
+ // parse lock account
+ lock, err := client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get cheque lock account: %w", err)
+ }
+
+ ev.LockValue, err = util.Uint160DecodeBytesBE(lock)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go
index d92b7922b..be53592ca 100644
--- a/pkg/morph/event/frostfs/cheque_test.go
+++ b/pkg/morph/event/frostfs/cheque_test.go
@@ -4,7 +4,7 @@ import (
"math/big"
"testing"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -26,7 +26,7 @@ func TestParseCheque(t *testing.T) {
}
_, err := ParseCheque(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong id parameter", func(t *testing.T) {
@@ -84,11 +84,3 @@ func TestParseCheque(t *testing.T) {
}, ev)
})
}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go
index 805e80f3c..4c87634c2 100644
--- a/pkg/morph/event/frostfs/config.go
+++ b/pkg/morph/event/frostfs/config.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -36,15 +36,39 @@ func (u Config) Key() []byte { return u.KeyValue }
func (u Config) Value() []byte { return u.ValueValue }
func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) {
- var sce frostfs.SetConfigEvent
- if err := sce.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err)
+ var (
+ ev Config
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Config{
- KeyValue: sce.Key,
- ValueValue: sce.Value,
- IDValue: sce.Id,
- TxHashValue: e.Container,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config update id: %w", err)
+ }
+
+ // parse key
+ ev.KeyValue, err = client.BytesFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config key: %w", err)
+ }
+
+ // parse value
+ ev.ValueValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get config value: %w", err)
+ }
+
+ ev.TxHashValue = e.Container
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go
index 8acc8c15c..dcd4201e4 100644
--- a/pkg/morph/event/frostfs/config_test.go
+++ b/pkg/morph/event/frostfs/config_test.go
@@ -3,6 +3,7 @@ package frostfs
import (
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -20,7 +21,7 @@ func TestParseConfig(t *testing.T) {
}
_, err := ParseConfig(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong first parameter", func(t *testing.T) {
diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go
index fcb01577e..d8a3b82f0 100644
--- a/pkg/morph/event/frostfs/deposit.go
+++ b/pkg/morph/event/frostfs/deposit.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -34,15 +34,50 @@ func (d Deposit) Amount() int64 { return d.AmountValue }
// ParseDeposit notification into deposit structure.
func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) {
- var de frostfs.DepositEvent
- if err := de.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err)
+ var ev Deposit
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Deposit{
- IDValue: de.TxHash[:],
- AmountValue: de.Amount.Int64(),
- FromValue: de.From,
- ToValue: de.Receiver,
- }, nil
+ if ln := len(params); ln != 4 {
+ return nil, event.WrongNumberOfParameters(4, ln)
+ }
+
+ // parse from
+ from, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit sender: %w", err)
+ }
+
+ ev.FromValue, err = util.Uint160DecodeBytesBE(from)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit amount: %w", err)
+ }
+
+ // parse to
+ to, err := client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit receiver: %w", err)
+ }
+
+ ev.ToValue, err = util.Uint160DecodeBytesBE(to)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[3])
+ if err != nil {
+ return nil, fmt.Errorf("could not get deposit id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go
index 38d3e61f6..f279a7f9c 100644
--- a/pkg/morph/event/frostfs/deposit_test.go
+++ b/pkg/morph/event/frostfs/deposit_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseDeposit(t *testing.T) {
var (
- id = util.Uint256{0, 1, 2, 3}
+ id = []byte("Hello World")
from = util.Uint160{0x1, 0x2, 0x3}
to = util.Uint160{0x3, 0x2, 0x1}
@@ -25,7 +26,7 @@ func TestParseDeposit(t *testing.T) {
}
_, err := ParseDeposit(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error())
})
t.Run("wrong from parameter", func(t *testing.T) {
@@ -71,12 +72,12 @@ func TestParseDeposit(t *testing.T) {
stackitem.NewByteArray(from.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
stackitem.NewByteArray(to.BytesBE()),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Deposit{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
FromValue: from,
ToValue: to,
diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go
new file mode 100644
index 000000000..62203540f
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update.go
@@ -0,0 +1,54 @@
+package frostfs
+
+import (
+ "crypto/elliptic"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+)
+
+type UpdateInnerRing struct {
+ keys []*keys.PublicKey
+}
+
+// MorphEvent implements Neo:Morph Event interface.
+func (UpdateInnerRing) MorphEvent() {}
+
+func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys }
+
+func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) {
+ var (
+ ev UpdateInnerRing
+ err error
+ )
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ // parse keys
+ irKeys, err := client.ArrayFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring keys: %w", err)
+ }
+
+ ev.keys = make([]*keys.PublicKey, 0, len(irKeys))
+ for i := range irKeys {
+ rawKey, err := client.BytesFromStackItem(irKeys[i])
+ if err != nil {
+ return nil, fmt.Errorf("could not get updated inner ring public key: %w", err)
+ }
+
+ key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err)
+ }
+
+ ev.keys = append(ev.keys, key)
+ }
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go
new file mode 100644
index 000000000..8ce6fdc36
--- /dev/null
+++ b/pkg/morph/event/frostfs/ir_update_test.go
@@ -0,0 +1,59 @@
+package frostfs
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func genKey(t *testing.T) *keys.PrivateKey {
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ return priv
+}
+
+func TestParseUpdateInnerRing(t *testing.T) {
+ var (
+ publicKeys = []*keys.PublicKey{
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ genKey(t).PublicKey(),
+ }
+ )
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseUpdateInnerRing(prms)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
+ })
+
+ t.Run("wrong first parameter", func(t *testing.T) {
+ _, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewMap(),
+ })
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct", func(t *testing.T) {
+ ev, err := ParseUpdateInnerRing([]stackitem.Item{
+ stackitem.NewArray([]stackitem.Item{
+ stackitem.NewByteArray(publicKeys[0].Bytes()),
+ stackitem.NewByteArray(publicKeys[1].Bytes()),
+ stackitem.NewByteArray(publicKeys[2].Bytes()),
+ }),
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, UpdateInnerRing{
+ keys: publicKeys,
+ }, ev)
+ })
+}
diff --git a/pkg/morph/event/frostfs/unbind.go b/pkg/morph/event/frostfs/unbind.go
new file mode 100644
index 000000000..5a6a8dad9
--- /dev/null
+++ b/pkg/morph/event/frostfs/unbind.go
@@ -0,0 +1,33 @@
+package frostfs
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/core/state"
+)
+
+type Unbind struct {
+ BindCommon
+}
+
+func ParseUnbind(e *state.ContainedNotificationEvent) (event.Event, error) {
+ var (
+ ev Unbind
+ err error
+ )
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ err = parseBind(&ev.BindCommon, params)
+ if err != nil {
+ return nil, err
+ }
+
+ ev.TxHashValue = e.Container
+
+ return ev, nil
+}
diff --git a/pkg/morph/event/frostfs/unbind_test.go b/pkg/morph/event/frostfs/unbind_test.go
new file mode 100644
index 000000000..4b79d7c48
--- /dev/null
+++ b/pkg/morph/event/frostfs/unbind_test.go
@@ -0,0 +1,63 @@
+package frostfs
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseUnbind(t *testing.T) {
+ var (
+ user = []byte{0x1, 0x2, 0x3}
+ publicKeys = [][]byte{
+ []byte("key1"),
+ []byte("key2"),
+ []byte("key3"),
+ }
+ )
+
+ t.Run("wrong number of parameters", func(t *testing.T) {
+ prms := []stackitem.Item{
+ stackitem.NewMap(),
+ }
+
+ _, err := ParseUnbind(createNotifyEventFromItems(prms))
+ require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error())
+ })
+
+ t.Run("wrong first parameter", func(t *testing.T) {
+ _, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("wrong second parameter", func(t *testing.T) {
+ _, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(user),
+ stackitem.NewMap(),
+ }))
+
+ require.Error(t, err)
+ })
+
+ t.Run("correct", func(t *testing.T) {
+ ev, err := ParseUnbind(createNotifyEventFromItems([]stackitem.Item{
+ stackitem.NewByteArray(user),
+ stackitem.NewArray([]stackitem.Item{
+ stackitem.NewByteArray(publicKeys[0]),
+ stackitem.NewByteArray(publicKeys[1]),
+ stackitem.NewByteArray(publicKeys[2]),
+ }),
+ }))
+ require.NoError(t, err)
+
+ e := ev.(Unbind)
+
+ require.Equal(t, user, e.User())
+ require.Equal(t, publicKeys, e.Keys())
+ })
+}
diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go
index 2568b6512..f48067f86 100644
--- a/pkg/morph/event/frostfs/withdraw.go
+++ b/pkg/morph/event/frostfs/withdraw.go
@@ -3,7 +3,7 @@ package frostfs
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -30,14 +30,39 @@ func (w Withdraw) Amount() int64 { return w.AmountValue }
// ParseWithdraw notification into withdraw structure.
func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) {
- var we frostfs.WithdrawEvent
- if err := we.FromStackItem(e.Item); err != nil {
- return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err)
+ var ev Withdraw
+
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
- return Withdraw{
- IDValue: we.TxHash[:],
- AmountValue: we.Amount.Int64(),
- UserValue: we.User,
- }, nil
+ if ln := len(params); ln != 3 {
+ return nil, event.WrongNumberOfParameters(3, ln)
+ }
+
+ // parse user
+ user, err := client.BytesFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw user: %w", err)
+ }
+
+ ev.UserValue, err = util.Uint160DecodeBytesBE(user)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err)
+ }
+
+ // parse amount
+ ev.AmountValue, err = client.IntFromStackItem(params[1])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw amount: %w", err)
+ }
+
+ // parse id
+ ev.IDValue, err = client.BytesFromStackItem(params[2])
+ if err != nil {
+ return nil, fmt.Errorf("could not get withdraw id: %w", err)
+ }
+
+ return ev, nil
}
diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go
index e382305e6..33435d19a 100644
--- a/pkg/morph/event/frostfs/withdraw_test.go
+++ b/pkg/morph/event/frostfs/withdraw_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
@@ -11,7 +12,7 @@ import (
func TestParseWithdraw(t *testing.T) {
var (
- id = util.Uint256{1, 2, 3}
+ id = []byte("Hello World")
user = util.Uint160{0x1, 0x2, 0x3}
amount int64 = 10
@@ -24,7 +25,7 @@ func TestParseWithdraw(t *testing.T) {
}
_, err := ParseWithdraw(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
})
t.Run("wrong user parameter", func(t *testing.T) {
@@ -58,12 +59,12 @@ func TestParseWithdraw(t *testing.T) {
ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{
stackitem.NewByteArray(user.BytesBE()),
stackitem.NewBigInteger(new(big.Int).SetInt64(amount)),
- stackitem.NewByteArray(id[:]),
+ stackitem.NewByteArray(id),
}))
require.NoError(t, err)
require.Equal(t, Withdraw{
- IDValue: id[:],
+ IDValue: id,
AmountValue: amount,
UserValue: user,
}, ev)
diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go
index 55a514ff1..182b4667e 100644
--- a/pkg/morph/event/handlers.go
+++ b/pkg/morph/event/handlers.go
@@ -1,26 +1,32 @@
package event
import (
- "context"
-
"github.com/nspcc-dev/neo-go/pkg/core/block"
- "github.com/nspcc-dev/neo-go/pkg/util"
)
// Handler is an Event processing function.
-type Handler func(context.Context, Event)
+type Handler func(Event)
// BlockHandler is a chain block processing function.
-type BlockHandler func(context.Context, *block.Block)
+type BlockHandler func(*block.Block)
// NotificationHandlerInfo is a structure that groups
// the parameters of the handler of particular
// contract event.
type NotificationHandlerInfo struct {
- Contract util.Uint160
- Type Type
- Parser NotificationParser
- Handlers []Handler
+ scriptHashWithType
+
+ h Handler
+}
+
+// SetHandler is an event handler setter.
+func (s *NotificationHandlerInfo) SetHandler(v Handler) {
+ s.h = v
+}
+
+// Handler returns an event handler.
+func (s NotificationHandlerInfo) Handler() Handler {
+ return s.h
}
// NotaryHandlerInfo is a structure that groups
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index e5cdfeef7..ca5031415 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -33,6 +33,13 @@ type Listener interface {
// it could not be started.
ListenWithError(context.Context, chan<- error)
+ // SetNotificationParser must set the parser of particular contract event.
+ //
+ // Parser of each event must be set once. All parsers must be set before Listen call.
+ //
+ // Must ignore nil parsers and all calls after listener has been started.
+ SetNotificationParser(NotificationParserInfo)
+
// RegisterNotificationHandler must register the event handler for particular notification event of contract.
//
// The specified handler must be called after each capture and parsing of the event.
@@ -89,10 +96,10 @@ type ListenerParams struct {
type listener struct {
mtx sync.RWMutex
- wg sync.WaitGroup
-
startOnce, stopOnce sync.Once
+ started bool
+
notificationParsers map[scriptHashWithType]NotificationParser
notificationHandlers map[scriptHashWithType][]Handler
@@ -111,18 +118,12 @@ type listener struct {
pool *ants.Pool
}
-const newListenerFailMsg = "instantiate Listener"
+const newListenerFailMsg = "could not instantiate Listener"
var (
errNilLogger = errors.New("nil logger")
errNilSubscriber = errors.New("nil event subscriber")
-
- errNotificationSubscrConnectionTerminated = errors.New("event subscriber connection has been terminated")
-
- errNotarySubscrConnectionTerminated = errors.New("notary event subscriber connection has been terminated")
-
- errBlockNotificationChannelClosed = errors.New("new block notification channel is closed")
)
// Listen starts the listening for events with registered handlers.
@@ -132,10 +133,11 @@ var (
// Returns an error if listener was already started.
func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
- l.wg.Add(1)
- defer l.wg.Done()
-
- l.listen(ctx, nil)
+ if err := l.listen(ctx, nil); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ }
})
}
@@ -147,31 +149,36 @@ func (l *listener) Listen(ctx context.Context) {
// Returns an error if listener was already started.
func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
- l.wg.Add(1)
- defer l.wg.Done()
-
- l.listen(ctx, intError)
+ if err := l.listen(ctx, intError); err != nil {
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
+ zap.String("error", err.Error()),
+ )
+ intError <- err
+ }
})
}
-func (l *listener) listen(ctx context.Context, intError chan<- error) {
+func (l *listener) listen(ctx context.Context, intError chan<- error) error {
+ // mark listener as started
+ l.started = true
+
subErrCh := make(chan error)
go l.subscribe(subErrCh)
l.listenLoop(ctx, intError, subErrCh)
+
+ return nil
}
func (l *listener) subscribe(errCh chan error) {
- l.wg.Add(1)
- defer l.wg.Done()
// create the list of listening contract hashes
hashes := make([]util.Uint160, 0)
// fill the list with the contracts with set event parsers.
l.mtx.RLock()
for hashType := range l.notificationParsers {
- scHash := hashType.Hash
+ scHash := hashType.ScriptHash()
// prevent repetitions
for _, hash := range hashes {
@@ -180,48 +187,31 @@ func (l *listener) subscribe(errCh chan error) {
}
}
- hashes = append(hashes, hashType.Hash)
+ hashes = append(hashes, hashType.ScriptHash())
}
l.mtx.RUnlock()
err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- errCh <- fmt.Errorf("subscribe for notifications: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
return
}
if len(l.blockHandlers) > 0 {
if err = l.subscriber.BlockNotifications(); err != nil {
- errCh <- fmt.Errorf("subscribe for blocks: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
return
}
}
if l.listenNotary {
if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- errCh <- fmt.Errorf("subscribe for notary requests: %w", err)
+ errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
return
}
}
}
-func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error) bool {
- if intErr == nil {
- return false
- }
- // This select required because were are reading from error channel and closing listener
- // in the same routine when shutting down node.
- select {
- case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
- zap.String("reason", ctx.Err().Error()),
- )
- return false
- case intErr <- err:
- return true
- }
-}
-
func (l *listener) listenLoop(ctx context.Context, intErr chan<- error, subErrCh chan error) {
chs := l.subscriber.NotificationChannels()
@@ -229,82 +219,94 @@ loop:
for {
select {
case err := <-subErrCh:
- if !l.sendError(ctx, intErr, err) {
- l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
+ if intErr != nil {
+ intErr <- err
+ } else {
+ l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
}
+
break loop
case <-ctx.Done():
- l.log.Info(ctx, logs.EventStopEventListenerByContext,
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
- l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
+ l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
+ if intErr != nil {
+ intErr <- errors.New("event subscriber connection has been terminated")
+ }
+
break loop
} else if notifyEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
+ l.log.Warn(logs.EventNilNotificationEventWasCaught)
continue loop
}
- l.handleNotifyEvent(ctx, notifyEvent)
+ l.handleNotifyEvent(notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
- l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
+ l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
+ if intErr != nil {
+ intErr <- errors.New("notary event subscriber connection has been terminated")
+ }
+
break loop
} else if notaryEvent == nil {
- l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
+ l.log.Warn(logs.EventNilNotaryEventWasCaught)
continue loop
}
- l.handleNotaryEvent(ctx, notaryEvent)
+ l.handleNotaryEvent(notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
- l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
+ l.log.Warn(logs.EventStopEventListenerByBlockChannel)
+ if intErr != nil {
+ intErr <- errors.New("new block notification channel is closed")
+ }
+
break loop
} else if b == nil {
- l.log.Warn(ctx, logs.EventNilBlockWasCaught)
+ l.log.Warn(logs.EventNilBlockWasCaught)
continue loop
}
- l.handleBlockEvent(ctx, b)
+ l.handleBlockEvent(b)
}
}
}
-func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) {
+func (l *listener) handleBlockEvent(b *block.Block) {
if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
- l.blockHandlers[i](ctx, b)
+ l.blockHandlers[i](b)
}
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) {
+func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(ctx, notaryEvent)
+ l.parseAndHandleNotary(notaryEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(ctx, notifyEvent)
+ l.parseAndHandleNotification(notifyEvent)
}); err != nil {
- l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained,
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
-func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) {
+func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
)
@@ -317,14 +319,16 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
)
// get the event parser
- keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent}
+ keyEvent := scriptHashWithType{}
+ keyEvent.SetScriptHash(notifyEvent.ScriptHash)
+ keyEvent.SetType(typEvent)
l.mtx.RLock()
parser, ok := l.notificationParsers[keyEvent]
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventEventParserNotSet)
+ log.Debug(logs.EventEventParserNotSet)
return
}
@@ -332,8 +336,8 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotificationEvent,
+ zap.String("error", err.Error()),
)
return
@@ -345,7 +349,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -353,11 +357,11 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
}
for _, handler := range handlers {
- handler(ctx, event)
+ handler(event)
}
}
-func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) {
+func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// prepare the notary event
notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest)
if err != nil {
@@ -365,14 +369,14 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.As(err, &expErr):
- l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
+ zap.String("error", err.Error()),
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
)
default:
- l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
- zap.Error(err),
+ l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
+ zap.String("error", err.Error()),
)
}
@@ -395,7 +399,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Debug(ctx, logs.EventNotaryParserNotSet)
+ log.Debug(logs.EventNotaryParserNotSet)
return
}
@@ -403,8 +407,8 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotParseNotaryEvent,
+ zap.String("error", err.Error()),
)
return
@@ -416,14 +420,47 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
l.mtx.RUnlock()
if !ok {
- log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+ log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
return
}
- handler(ctx, event)
+ handler(event)
+}
+
+// SetNotificationParser sets the parser of particular contract event.
+//
+// Ignores nil and already set parsers.
+// Ignores the parser if listener is started.
+func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
+ log := l.log.With(
+ zap.String("contract", pi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", pi.getType()),
+ )
+
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilEventParser)
+ return
+ }
+
+ l.mtx.Lock()
+ defer l.mtx.Unlock()
+
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
+ return
+ }
+
+ // add event parser
+ if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok {
+ l.notificationParsers[pi.scriptHashWithType] = pi.parser()
+ }
+
+ log.Debug(logs.EventRegisteredNewEventParser)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -432,23 +469,35 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
// Ignores handlers of event without parser.
func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
log := l.log.With(
- zap.String("contract", hi.Contract.StringLE()),
- zap.Stringer("event_type", hi.Type),
+ zap.String("contract", hi.ScriptHash().StringLE()),
+ zap.Stringer("event_type", hi.GetType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilEventHandler)
+ return
+ }
+
// check if parser was set
+ l.mtx.RLock()
+ _, ok := l.notificationParsers[hi.scriptHashWithType]
+ l.mtx.RUnlock()
+
+ if !ok {
+ log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
+ return
+ }
+
+ // add event handler
l.mtx.Lock()
- defer l.mtx.Unlock()
-
- k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type}
-
- l.notificationParsers[k] = hi.Parser
- l.notificationHandlers[k] = append(
- l.notificationHandlers[k],
- hi.Handlers...,
+ l.notificationHandlers[hi.scriptHashWithType] = append(
+ l.notificationHandlers[hi.scriptHashWithType],
+ hi.Handler(),
)
+ l.mtx.Unlock()
- log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Debug(logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -487,15 +536,27 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
zap.Stringer("notary_type", pi.RequestType()),
)
+ parser := pi.parser()
+ if parser == nil {
+ log.Info(logs.EventIgnoreNilNotaryEventParser)
+ return
+ }
+
l.mtx.Lock()
defer l.mtx.Unlock()
+ // check if the listener was started
+ if l.started {
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
+ return
+ }
+
// add event parser
if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info(context.Background(), logs.EventRegisteredNewEventParser)
+ log.Info(logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -513,13 +574,19 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
zap.Stringer("notary type", hi.RequestType()),
)
+ handler := hi.Handler()
+ if handler == nil {
+ log.Warn(logs.EventIgnoreNilNotaryEventHandler)
+ return
+ }
+
// check if parser was set
l.mtx.RLock()
_, ok := l.notaryParsers[hi.notaryRequestTypes]
l.mtx.RUnlock()
if !ok {
- log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
+ log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -528,19 +595,22 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
+ log.Info(logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
func (l *listener) Stop() {
l.stopOnce.Do(func() {
l.subscriber.Close()
- l.pool.Release()
})
- l.wg.Wait()
}
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
+ if handler == nil {
+ l.log.Warn(logs.EventIgnoreNilBlockHandler)
+ return
+ }
+
l.blockHandlers = append(l.blockHandlers, handler)
}
@@ -557,7 +627,7 @@ func NewListener(p ListenerParams) (Listener, error) {
// The default capacity is 0, which means "infinite".
pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
- return nil, fmt.Errorf("init worker pool: %w", err)
+ return nil, fmt.Errorf("could not init worker pool: %w", err)
}
return &listener{
diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go
index 87f37305f..dc7ac3b81 100644
--- a/pkg/morph/event/listener_test.go
+++ b/pkg/morph/event/listener_test.go
@@ -20,7 +20,7 @@ func TestEventHandling(t *testing.T) {
notaryRequestsCh := make(chan *result.NotaryRequestEvent)
l, err := NewListener(ListenerParams{
- Logger: test.NewLogger(t),
+ Logger: test.NewLogger(t, true),
Subscriber: &testSubscriber{
blockCh: blockCh,
notificationCh: notificationCh,
@@ -34,24 +34,34 @@ func TestEventHandling(t *testing.T) {
blockHandled := make(chan bool)
handledBlocks := make([]*block.Block, 0)
- l.RegisterBlockHandler(func(_ context.Context, b *block.Block) {
+ l.RegisterBlockHandler(func(b *block.Block) {
handledBlocks = append(handledBlocks, b)
blockHandled <- true
})
+ key := scriptHashWithType{
+ scriptHashValue: scriptHashValue{
+ hash: util.Uint160{100},
+ },
+ typeValue: typeValue{
+ typ: TypeFromString("notification type"),
+ },
+ }
+
+ l.SetNotificationParser(NotificationParserInfo{
+ scriptHashWithType: key,
+ p: func(cne *state.ContainedNotificationEvent) (Event, error) {
+ return testNotificationEvent{source: cne}, nil
+ },
+ })
+
notificationHandled := make(chan bool)
handledNotifications := make([]Event, 0)
l.RegisterNotificationHandler(NotificationHandlerInfo{
- Contract: util.Uint160{100},
- Type: TypeFromString("notification type"),
- Parser: func(cne *state.ContainedNotificationEvent) (Event, error) {
- return testNotificationEvent{source: cne}, nil
- },
- Handlers: []Handler{
- func(_ context.Context, e Event) {
- handledNotifications = append(handledNotifications, e)
- notificationHandled <- true
- },
+ scriptHashWithType: key,
+ h: func(e Event) {
+ handledNotifications = append(handledNotifications, e)
+ notificationHandled <- true
},
})
@@ -92,7 +102,7 @@ func TestErrorPassing(t *testing.T) {
t.Run("notification error", func(t *testing.T) {
nErr := fmt.Errorf("notification error")
l, err := NewListener(ListenerParams{
- Logger: test.NewLogger(t),
+ Logger: test.NewLogger(t, true),
Subscriber: &testSubscriber{
blockCh: blockCh,
notificationCh: notificationCh,
@@ -116,7 +126,7 @@ func TestErrorPassing(t *testing.T) {
t.Run("block error", func(t *testing.T) {
bErr := fmt.Errorf("notification error")
l, err := NewListener(ListenerParams{
- Logger: test.NewLogger(t),
+ Logger: test.NewLogger(t, true),
Subscriber: &testSubscriber{
blockCh: blockCh,
notificationCh: notificationCh,
@@ -127,7 +137,7 @@ func TestErrorPassing(t *testing.T) {
WorkerPoolCapacity: 10,
})
require.NoError(t, err, "failed to create listener")
- l.RegisterBlockHandler(func(context.Context, *block.Block) {})
+ l.RegisterBlockHandler(func(b *block.Block) {})
errCh := make(chan error)
@@ -137,6 +147,7 @@ func TestErrorPassing(t *testing.T) {
require.ErrorIs(t, err, bErr, "invalid block error")
})
+
}
type testSubscriber struct {
@@ -155,7 +166,6 @@ func (s *testSubscriber) UnsubscribeForNotification() {}
func (s *testSubscriber) BlockNotifications() error {
return s.blockErr
}
-
func (s *testSubscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error {
return nil
}
diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go
index 39c8f6237..e454e2a6a 100644
--- a/pkg/morph/event/netmap/epoch.go
+++ b/pkg/morph/event/netmap/epoch.go
@@ -1,7 +1,9 @@
package netmap
import (
- "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -35,13 +37,22 @@ func (s NewEpoch) TxHash() util.Uint256 {
//
// Result is type of NewEpoch.
func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
- var nee netmap.NewEpochEvent
- if err := nee.FromStackItem(e.Item); err != nil {
- return nil, err
+ params, err := event.ParseStackArray(e)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
+ }
+
+ if ln := len(params); ln != 1 {
+ return nil, event.WrongNumberOfParameters(1, ln)
+ }
+
+ prmEpochNum, err := client.IntFromStackItem(params[0])
+ if err != nil {
+ return nil, fmt.Errorf("could not get integer epoch number: %w", err)
}
return NewEpoch{
- Num: nee.Epoch.Uint64(),
+ Num: uint64(prmEpochNum),
Hash: e.Container,
}, nil
}
diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go
index 6ff692327..bc267ecb6 100644
--- a/pkg/morph/event/netmap/epoch_test.go
+++ b/pkg/morph/event/netmap/epoch_test.go
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/stretchr/testify/require"
)
@@ -16,7 +17,7 @@ func TestParseNewEpoch(t *testing.T) {
}
_, err := ParseNewEpoch(createNotifyEventFromItems(prms))
- require.Error(t, err)
+ require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
})
t.Run("wrong first parameter type", func(t *testing.T) {
diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go
index 993182ab4..0260810b8 100644
--- a/pkg/morph/event/netmap/update_peer_notary.go
+++ b/pkg/morph/event/netmap/update_peer_notary.go
@@ -10,7 +10,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
)
-var errNilPubKey = errors.New("public key is nil")
+var errNilPubKey = errors.New("could not parse public key: public key is nil")
func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
if v == nil {
@@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256())
if err != nil {
- return fmt.Errorf("parse public key: %w", err)
+ return fmt.Errorf("could not parse public key: %w", err)
}
return
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index b11973646..37091f768 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
for {
opCode, param, err = ctx.Next()
if err != nil {
- return nil, fmt.Errorf("get next opcode in script: %w", err)
+ return nil, fmt.Errorf("could not get next opcode in script: %w", err)
}
if opCode == opcode.RET {
@@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
// retrieve contract's script hash
contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param)
if err != nil {
- return nil, fmt.Errorf("decode contract hash: %w", err)
+ return nil, fmt.Errorf("could not decode contract hash: %w", err)
}
// retrieve contract's method
@@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
if len(args) != 0 {
err = p.validateParameterOpcodes(args)
if err != nil {
- return nil, fmt.Errorf("validate arguments: %w", err)
+ return nil, fmt.Errorf("could not validate arguments: %w", err)
}
// without args packing opcodes
@@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
// neo-go API)
//
// this check prevents notary flow recursion
- if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 &&
- !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version
+ if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 ||
+ bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version
return ErrTXAlreadyHandled
}
currentAlphabet, err := p.alphaKeys()
if err != nil {
- return fmt.Errorf("fetch Alphabet public keys: %w", err)
+ return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
}
err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
@@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error {
argsLen, err := IntFromOpcode(ops[l-2])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
err = validateNestedArgs(argsLen, ops[:l-2])
@@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error {
argsLen, err := IntFromOpcode(ops[i-1])
if err != nil {
- return fmt.Errorf("parse argument len: %w", err)
+ return fmt.Errorf("could not parse argument len: %w", err)
}
expArgLen += argsLen + 1
@@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error {
currBlock, err := p.blockCounter.BlockCount()
if err != nil {
- return fmt.Errorf("fetch current chain height: %w", err)
+ return fmt.Errorf("could not fetch current chain height: %w", err)
}
if currBlock >= nvb.Height {
@@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) {
@@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys)
if err != nil {
- return fmt.Errorf("get Alphabet verification script: %w", err)
+ return fmt.Errorf("could not get Alphabet verification script: %w", err)
}
// the second one must be witness of the current
@@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu
// the last one must be a placeholder for notary contract witness
last := len(w) - 1
- if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981
- !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
+ if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981
+ bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version
len(w[last].VerificationScript) != 0 {
return errIncorrectNotaryPlaceholder
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 60ddb4601..8da9d868a 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -25,7 +25,7 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyAlphabetInvocationScript []byte
+ dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in
dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
@@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) {
)
for _, test := range tests {
- for i := range 1 { // run tests against 3 and 4 witness NR
+ for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR
for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
additionalWitness := i == 0
nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go
index 5adeb4b30..90eff0bd2 100644
--- a/pkg/morph/event/parsers.go
+++ b/pkg/morph/event/parsers.go
@@ -11,6 +11,15 @@ import (
// from the StackItem list.
type NotificationParser func(*state.ContainedNotificationEvent) (Event, error)
+// NotificationParserInfo is a structure that groups
+// the parameters of particular contract
+// notification event parser.
+type NotificationParserInfo struct {
+ scriptHashWithType
+
+ p NotificationParser
+}
+
// NotaryPreparator constructs NotaryEvent
// from the NotaryRequest event.
type NotaryPreparator interface {
@@ -38,6 +47,24 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) {
n.p = p
}
+// SetParser is an event parser setter.
+func (s *NotificationParserInfo) SetParser(v NotificationParser) {
+ s.p = v
+}
+
+func (s NotificationParserInfo) parser() NotificationParser {
+ return s.p
+}
+
+// SetType is an event type setter.
+func (s *NotificationParserInfo) SetType(v Type) {
+ s.typ = v
+}
+
+func (s NotificationParserInfo) getType() Type {
+ return s.typ
+}
+
type wrongPrmNumber struct {
exp, act int
}
diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go
index b384e436b..28c968046 100644
--- a/pkg/morph/event/rolemanagement/designate.go
+++ b/pkg/morph/event/rolemanagement/designate.go
@@ -26,7 +26,7 @@ func (Designate) MorphEvent() {}
func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) {
params, err := event.ParseStackArray(e)
if err != nil {
- return nil, fmt.Errorf("parse stack items from notify event: %w", err)
+ return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
}
if len(params) != 2 {
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index 0088be400..2a7c6250d 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,7 +1,6 @@
package event
import (
- "context"
"errors"
"fmt"
@@ -20,9 +19,13 @@ type scriptHashValue struct {
hash util.Uint160
}
+type typeValue struct {
+ typ Type
+}
+
type scriptHashWithType struct {
- Hash util.Uint160
- Type Type
+ scriptHashValue
+ typeValue
}
type notaryRequestTypes struct {
@@ -69,15 +72,26 @@ func (s scriptHashValue) ScriptHash() util.Uint160 {
return s.hash
}
+// SetType is an event type setter.
+func (s *typeValue) SetType(v Type) {
+ s.typ = v
+}
+
+// GetType is an event type getter.
+func (s typeValue) GetType() Type {
+ return s.typ
+}
+
// WorkerPoolHandler sets closure over worker pool w with passed handler h.
func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler {
- return func(ctx context.Context, e Event) {
+ return func(e Event) {
err := w.Submit(func() {
- h(ctx, e)
+ h(e)
})
+
if err != nil {
- log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
- zap.Error(err),
+ log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
+ zap.String("error", err.Error()),
)
}
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 4ef59ed6a..c2d8494fa 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -12,6 +12,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/util"
"go.uber.org/zap"
)
@@ -182,6 +183,8 @@ func New(ctx context.Context, p *Params) (Subscriber, error) {
func (s *subscriber) routeNotifications(ctx context.Context) {
var (
+ // TODO: not needed after nspcc-dev/neo-go#2980.
+ cliCh = s.client.NotificationChannel()
restoreCh = make(chan bool)
restoreInProgress bool
)
@@ -217,6 +220,8 @@ routeloop:
} else {
connLost = true
}
+ case _, ok := <-cliCh:
+ connLost = !ok
case ok := <-restoreCh:
restoreInProgress = false
if !ok {
@@ -225,7 +230,7 @@ routeloop:
}
if connLost {
if !restoreInProgress {
- restoreInProgress = s.switchEndpoint(ctx, restoreCh)
+ restoreInProgress, cliCh = s.switchEndpoint(ctx, restoreCh)
if !restoreInProgress {
break routeloop
}
@@ -244,23 +249,25 @@ routeloop:
close(s.notaryChan)
}
-func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
- s.log.Info(ctx, logs.RPConnectionLost)
+func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) (bool, <-chan rpcclient.Notification) {
+ s.log.Info(logs.RPConnectionLost)
if !s.client.SwitchRPC(ctx) {
- s.log.Error(ctx, logs.RPCNodeSwitchFailure)
- return false
+ s.log.Error(logs.RPCNodeSwitchFailure)
+ return false, nil
}
+ cliCh := s.client.NotificationChannel()
+
s.Lock()
chs := newSubChannels()
go func() {
- finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
+ finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan)
}()
s.current = chs
s.Unlock()
s.client.Metrics().IncSwitchCount()
- return true
+ return true, cliCh
}
func newSubChannels() subChannels {
@@ -295,34 +302,35 @@ drainloop:
// restoreSubscriptions restores subscriptions according to
// cached information about them.
-func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent,
- blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent,
-) bool {
+func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent,
+ blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent) bool {
var err error
// new block events restoration
if s.subscribedToNewBlocks {
_, err = s.client.ReceiveBlocks(blCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
// notification events restoration
for contract := range s.subscribedEvents {
+ contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890
_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
// notary notification events restoration
for signer := range s.subscribedNotaryEvents {
+ signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890
_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
if err != nil {
- s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+ s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
return false
}
}
diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go
index 974be1120..be20d3571 100644
--- a/pkg/morph/timer/block.go
+++ b/pkg/morph/timer/block.go
@@ -15,19 +15,41 @@ type BlockTickHandler func()
// It can tick the blocks and perform certain actions
// on block time intervals.
type BlockTimer struct {
+ rolledBack bool
+
mtx sync.Mutex
dur BlockMeter
baseDur uint32
+ mul, div uint32
+
cur, tgt uint32
last uint32
h BlockTickHandler
+ ps []BlockTimer
+
once bool
+
+ deltaCfg
+}
+
+// DeltaOption is an option of delta-interval handler.
+type DeltaOption func(*deltaCfg)
+
+type deltaCfg struct {
+ pulse bool
+}
+
+// WithPulse returns option to call delta-interval handler multiple times.
+func WithPulse() DeltaOption {
+ return func(c *deltaCfg) {
+ c.pulse = true
+ }
}
// StaticBlockMeter returns BlockMeters that always returns (d, nil).
@@ -43,19 +65,52 @@ func StaticBlockMeter(d uint32) BlockMeter {
func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
+ deltaCfg: deltaCfg{
+ pulse: true,
+ },
}
}
// NewOneTickTimer creates a new BlockTimer that ticks only once.
+//
+// Do not use delta handlers with pulse in this timer.
func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer {
return &BlockTimer{
dur: dur,
+ mul: 1,
+ div: 1,
h: h,
once: true,
}
}
+// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block
+// after basic interval reset.
+//
+// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block
+// during base interval.
+func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) {
+ c := deltaCfg{
+ pulse: false,
+ }
+
+ for i := range opts {
+ opts[i](&c)
+ }
+
+ t.ps = append(t.ps, BlockTimer{
+ mul: mul,
+ div: div,
+ h: h,
+ once: t.once,
+
+ deltaCfg: c,
+ })
+}
+
// Reset resets previous ticks of the BlockTimer.
//
// Returns BlockMeter's error upon occurrence.
@@ -69,18 +124,29 @@ func (t *BlockTimer) Reset() error {
t.resetWithBaseInterval(d)
+ for i := range t.ps {
+ t.ps[i].resetWithBaseInterval(d)
+ }
+
t.mtx.Unlock()
return nil
}
func (t *BlockTimer) resetWithBaseInterval(d uint32) {
+ t.rolledBack = false
t.baseDur = d
t.reset()
}
func (t *BlockTimer) reset() {
- delta := t.baseDur
+ mul, div := t.mul, t.div
+
+ if !t.pulse && t.rolledBack && mul < div {
+ mul, div = 1, 1
+ }
+
+ delta := mul * t.baseDur / div
if delta == 0 {
delta = 1
}
@@ -114,7 +180,12 @@ func (t *BlockTimer) tick(h uint32) {
if !t.once {
t.cur = 0
+ t.rolledBack = true
t.reset()
}
}
+
+ for i := range t.ps {
+ t.ps[i].tick(h)
+ }
}
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index a144b3db6..93bb04de5 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -1,7 +1,6 @@
package timer_test
import (
- "errors"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer"
@@ -9,7 +8,7 @@ import (
)
func tickN(t *timer.BlockTimer, n uint32) {
- for range n {
+ for i := uint32(0); i < n; i++ {
t.Tick(0)
}
}
@@ -18,7 +17,7 @@ func tickN(t *timer.BlockTimer, n uint32) {
// "resetting" consists of ticking the current height as well and invoking `Reset`.
func TestIRBlockTimer_Reset(t *testing.T) {
var baseCounter [2]int
- const blockDur = uint32(3)
+ blockDur := uint32(3)
bt1 := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
@@ -49,40 +48,8 @@ func TestIRBlockTimer_Reset(t *testing.T) {
require.Equal(t, baseCounter[0], baseCounter[1])
}
-func TestBlockTimer_ResetChangeDuration(t *testing.T) {
- var dur uint32 = 2
- var err error
- var counter int
-
- bt := timer.NewBlockTimer(
- func() (uint32, error) { return dur, err },
- func() { counter++ })
-
- require.NoError(t, bt.Reset())
-
- tickN(bt, 2)
- require.Equal(t, 1, counter)
-
- t.Run("return error", func(t *testing.T) {
- dur = 5
- err = errors.New("my awesome error")
- require.ErrorIs(t, bt.Reset(), err)
-
- tickN(bt, 2)
- require.Equal(t, 2, counter)
- })
- t.Run("change duration", func(t *testing.T) {
- dur = 5
- err = nil
- require.NoError(t, bt.Reset())
-
- tickN(bt, 5)
- require.Equal(t, 3, counter)
- })
-}
-
func TestBlockTimer(t *testing.T) {
- const blockDur = uint32(10)
+ blockDur := uint32(10)
baseCallCounter := uint32(0)
bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
@@ -96,6 +63,85 @@ func TestBlockTimer(t *testing.T) {
tickN(bt, intervalNum*blockDur)
require.Equal(t, intervalNum, uint32(baseCallCounter))
+
+ // add half-interval handler
+ halfCallCounter := uint32(0)
+
+ bt.OnDelta(1, 2, func() {
+ halfCallCounter++
+ })
+
+ // add double interval handler
+ doubleCallCounter := uint32(0)
+
+ bt.OnDelta(2, 1, func() {
+ doubleCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ baseCallCounter = 0
+ intervalNum = 20
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(halfCallCounter))
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum/2, uint32(doubleCallCounter))
+}
+
+func TestDeltaPulse(t *testing.T) {
+ blockDur := uint32(9)
+ baseCallCounter := uint32(0)
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ deltaCallCounter := uint32(0)
+
+ div := uint32(3)
+
+ bt.OnDelta(1, div, func() {
+ deltaCallCounter++
+ }, timer.WithPulse())
+
+ require.NoError(t, bt.Reset())
+
+ intervalNum := uint32(7)
+
+ tickN(bt, intervalNum*blockDur)
+
+ require.Equal(t, intervalNum, uint32(baseCallCounter))
+ require.Equal(t, intervalNum*div, uint32(deltaCallCounter))
+}
+
+func TestDeltaReset(t *testing.T) {
+ blockDur := uint32(6)
+ baseCallCounter := 0
+
+ bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 3, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 6)
+
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 3)
+
+ require.Equal(t, 2, detlaCallCounter)
}
func TestNewOneTickTimer(t *testing.T) {
@@ -122,51 +168,82 @@ func TestNewOneTickTimer(t *testing.T) {
tickN(bt, 10)
require.Equal(t, 1, baseCallCounter)
})
+
+ t.Run("delta without pulse", func(t *testing.T) {
+ blockDur = uint32(10)
+ baseCallCounter = 0
+
+ bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() {
+ baseCallCounter++
+ })
+
+ detlaCallCounter := 0
+
+ bt.OnDelta(1, 10, func() {
+ detlaCallCounter++
+ })
+
+ require.NoError(t, bt.Reset())
+
+ tickN(bt, 10)
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+
+ tickN(bt, 10) // 10 more ticks must not affect counters
+ require.Equal(t, 1, baseCallCounter)
+ require.Equal(t, 1, detlaCallCounter)
+ })
}
func TestBlockTimer_TickSameHeight(t *testing.T) {
- var baseCounter int
+ var baseCounter, deltaCounter int
blockDur := uint32(2)
bt := timer.NewBlockTimer(
func() (uint32, error) { return blockDur, nil },
func() { baseCounter++ })
+ bt.OnDelta(2, 1, func() {
+ deltaCounter++
+ })
require.NoError(t, bt.Reset())
- check := func(t *testing.T, h uint32, base int) {
- for range 2 * int(blockDur) {
+ check := func(t *testing.T, h uint32, base, delta int) {
+ for i := 0; i < 2*int(blockDur); i++ {
bt.Tick(h)
require.Equal(t, base, baseCounter)
+ require.Equal(t, delta, deltaCounter)
}
}
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
t.Run("works the same way after `Reset()`", func(t *testing.T) {
t.Run("same block duration", func(t *testing.T) {
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 1)
- check(t, 3, 1)
- check(t, 4, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 1, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 2, 1)
})
t.Run("different block duration", func(t *testing.T) {
blockDur = 3
require.NoError(t, bt.Reset())
baseCounter = 0
+ deltaCounter = 0
- check(t, 1, 0)
- check(t, 2, 0)
- check(t, 3, 1)
- check(t, 4, 1)
- check(t, 5, 1)
- check(t, 6, 2)
+ check(t, 1, 0, 0)
+ check(t, 2, 0, 0)
+ check(t, 3, 1, 0)
+ check(t, 4, 1, 0)
+ check(t, 5, 1, 0)
+ check(t, 6, 2, 1)
})
})
}
diff --git a/pkg/network/address.go b/pkg/network/address.go
index 4643eef15..8ad285725 100644
--- a/pkg/network/address.go
+++ b/pkg/network/address.go
@@ -1,13 +1,12 @@
package network
import (
- "errors"
+ "fmt"
"net"
"net/url"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -19,8 +18,6 @@ import (
URIAddr strings: "127.0.0.1:8080"
*/
-var errHostIsEmpty = errors.New("host is empty")
-
// Address represents the FrostFS node
// network address.
type Address struct {
@@ -44,9 +41,11 @@ func (a Address) equal(addr Address) bool {
// See also FromString.
func (a Address) URIAddr() string {
_, host, err := manet.DialArgs(a.ma)
- // the only correct way to construct Address is AddressFromString
- // which makes this error appear unexpected
- assert.NoError(err, "could not get host addr")
+ if err != nil {
+ // the only correct way to construct Address is AddressFromString
+ // which makes this error appear unexpected
+ panic(fmt.Errorf("could not get host addr: %w", err))
+ }
if !a.IsTLSEnabled() {
return host
@@ -89,10 +88,6 @@ func (a *Address) FromString(s string) error {
// multiaddrStringFromHostAddr converts "localhost:8080" to "/dns4/localhost/tcp/8080".
func multiaddrStringFromHostAddr(host string) (string, error) {
- if len(host) == 0 {
- return "", errHostIsEmpty
- }
-
endpoint, port, err := net.SplitHostPort(host)
if err != nil {
return "", err
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 63ae0bfdb..371d3c76f 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,7 +5,6 @@ import (
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -26,7 +25,6 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
- DialerSource *net.DialerSource
}
)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 54c1e18fb..d03edd962 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -7,13 +7,11 @@ import (
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"google.golang.org/grpc"
@@ -53,48 +51,41 @@ func newMultiClient(addr network.AddressGroup, opts ClientCacheOpts) *multiClien
}
func (x *multiClient) createForAddress(ctx context.Context, addr network.Address) (clientcore.Client, error) {
- var c client.Client
+ var (
+ c client.Client
+ prmInit client.PrmInit
+ prmDial client.PrmDial
+ )
+
+ prmDial.SetServerURI(addr.URIAddr())
- prmInit := client.PrmInit{
- DisableFrostFSErrorResolution: true,
- }
if x.opts.Key != nil {
- prmInit.Key = *x.opts.Key
+ prmInit.SetDefaultPrivateKey(*x.opts.Key)
}
- grpcOpts := []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
- ),
- grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- }
-
- prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: grpcOpts,
- }
if x.opts.DialTimeout > 0 {
- prmDial.DialTimeout = x.opts.DialTimeout
+ prmDial.SetTimeout(x.opts.DialTimeout)
}
if x.opts.StreamTimeout > 0 {
- prmDial.StreamTimeout = x.opts.StreamTimeout
+ prmDial.SetStreamTimeout(x.opts.StreamTimeout)
}
if x.opts.ResponseCallback != nil {
- prmInit.ResponseInfoCallback = x.opts.ResponseCallback
+ prmInit.SetResponseInfoCallback(x.opts.ResponseCallback)
}
+ prmDial.SetGRPCDialOptions(
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ )
+
c.Init(prmInit)
err := c.Dial(ctx, prmDial)
if err != nil {
@@ -161,7 +152,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
group.IterateAddresses(func(addr network.Address) bool {
select {
case <-ctx.Done():
- firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled)
+ firstErr = context.Canceled
return true
default:
}
@@ -177,18 +168,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
// from the SDK client; should not be considered
// as a connection error
var siErr *objectSDK.SplitInfoError
- var eiErr *objectSDK.ECInfoError
- if err != nil {
- err = fmt.Errorf("client connection error at %v: %w", addr, err)
- x.ReportError(err)
- }
-
- success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
+ success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr)
if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
firstErr = err
}
+ if err != nil {
+ x.ReportError(err)
+ }
+
return success
})
@@ -208,8 +197,7 @@ func (x *multiClient) ReportError(err error) {
// from the SDK client; should not be considered
// as a connection error
var siErr *objectSDK.SplitInfoError
- var eiErr *objectSDK.ECInfoError
- if errors.As(err, &siErr) || errors.As(err, &eiErr) {
+ if errors.As(err, &siErr) {
return
}
@@ -250,6 +238,15 @@ func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPut
return
}
+func (x *multiClient) ContainerAnnounceUsedSpace(ctx context.Context, prm client.PrmAnnounceSpace) (res *client.ResAnnounceSpace, err error) {
+ err = x.iterateClients(ctx, func(c clientcore.Client) error {
+ res, err = c.ContainerAnnounceUsedSpace(ctx, prm)
+ return err
+ })
+
+ return
+}
+
func (x *multiClient) ObjectDelete(ctx context.Context, p client.PrmObjectDelete) (res *client.ResObjectDelete, err error) {
err = x.iterateClients(ctx, func(c clientcore.Client) error {
res, err = c.ObjectDelete(ctx, p)
diff --git a/pkg/network/group.go b/pkg/network/group.go
index 0044fb2d4..a6de0653e 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -3,8 +3,6 @@ package network
import (
"errors"
"fmt"
- "iter"
- "slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -69,10 +67,11 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Addresses must return an iterator over network addresses.
- Addresses() iter.Seq[string]
+ // Must iterate over network addresses and pass each one
+ // to the handler until it returns true.
+ IterateAddresses(func(string) bool)
- // NumberOfAddresses must return number of addresses in group.
+ // Must return number of addresses in group.
NumberOfAddresses() int
}
@@ -131,19 +130,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error {
// iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f
// until 1st parsing failure or f's error.
func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) {
- for s := range iter.Addresses() {
+ iter.IterateAddresses(func(s string) bool {
var a Address
err = a.FromString(s)
if err != nil {
- return fmt.Errorf("could not parse address from string: %w", err)
+ err = fmt.Errorf("could not parse address from string: %w", err)
+ return true
}
err = f(a)
- if err != nil {
- return err
- }
- }
+
+ return err != nil
+ })
return
}
@@ -165,8 +164,10 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
// at least one common address.
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
for i := range x {
- if slices.ContainsFunc(x2, x[i].equal) {
- return true
+ for j := range x2 {
+ if x[i].equal(x2[j]) {
+ return true
+ }
}
}
diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go
index d08264533..5b335fa52 100644
--- a/pkg/network/group_test.go
+++ b/pkg/network/group_test.go
@@ -1,8 +1,6 @@
package network
import (
- "iter"
- "slices"
"sort"
"testing"
@@ -60,8 +58,10 @@ func TestAddressGroup_FromIterator(t *testing.T) {
type testIterator []string
-func (t testIterator) Addresses() iter.Seq[string] {
- return slices.Values(t)
+func (t testIterator) IterateAddresses(f func(string) bool) {
+ for i := range t {
+ f(t[i])
+ }
}
func (t testIterator) NumberOfAddresses() int {
diff --git a/pkg/network/tls.go b/pkg/network/tls.go
index 544dc8240..9aac89c47 100644
--- a/pkg/network/tls.go
+++ b/pkg/network/tls.go
@@ -13,6 +13,11 @@ var tls, _ = multiaddr.NewMultiaddr("/" + tlsProtocolName)
// IsTLSEnabled searches for wrapped TLS protocol in multiaddr.
func (a Address) IsTLSEnabled() bool {
- _, err := a.ma.ValueForProtocol(multiaddr.P_TLS)
- return err == nil
+ for _, protoc := range a.ma.Protocols() {
+ if protoc.Code == multiaddr.P_TLS {
+ return true
+ }
+ }
+
+ return false
}
diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go
index 14729f4c2..d93ea6a12 100644
--- a/pkg/network/tls_test.go
+++ b/pkg/network/tls_test.go
@@ -27,18 +27,3 @@ func TestAddress_TLSEnabled(t *testing.T) {
require.Equal(t, test.wantTLS, addr.IsTLSEnabled(), test.input)
}
}
-
-func BenchmarkAddressTLSEnabled(b *testing.B) {
- var addr Address
- err := addr.FromString("/dns4/localhost/tcp/8080/tls")
- require.NoError(b, err)
-
- b.ResetTimer()
- b.ReportAllocs()
-
- var enabled bool
- for range b.N {
- enabled = addr.IsTLSEnabled()
- }
- require.True(b, enabled)
-}
diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go
index 78129bfbe..2144a3001 100644
--- a/pkg/network/transport/accounting/grpc/service.go
+++ b/pkg/network/transport/accounting/grpc/service.go
@@ -3,9 +3,9 @@ package accounting
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
+ accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
- accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc"
)
// Server wraps FrostFS API Accounting service and
diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go
deleted file mode 100644
index 850d38a65..000000000
--- a/pkg/network/transport/apemanager/grpc/service.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package apemanager
-
-import (
- "context"
-
- apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
-)
-
-type Server struct {
- srv apemanager_svc.Server
-}
-
-var _ apemanager_grpc.APEManagerServiceServer = (*Server)(nil)
-
-func New(c apemanager_svc.Server) *Server {
- return &Server{
- srv: c,
- }
-}
-
-func (s *Server) AddChain(ctx context.Context, req *apemanager_grpc.AddChainRequest) (*apemanager_grpc.AddChainResponse, error) {
- v2req := new(apemanager_v2.AddChainRequest)
- if err := v2req.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AddChain(ctx, v2req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*apemanager_grpc.AddChainResponse), nil
-}
-
-func (s *Server) RemoveChain(ctx context.Context, req *apemanager_grpc.RemoveChainRequest) (*apemanager_grpc.RemoveChainResponse, error) {
- v2req := new(apemanager_v2.RemoveChainRequest)
- if err := v2req.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.RemoveChain(ctx, v2req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*apemanager_grpc.RemoveChainResponse), nil
-}
-
-func (s *Server) ListChains(ctx context.Context, req *apemanager_grpc.ListChainsRequest) (*apemanager_grpc.ListChainsResponse, error) {
- v2req := new(apemanager_v2.ListChainsRequest)
- if err := v2req.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.ListChains(ctx, v2req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*apemanager_grpc.ListChainsResponse), nil
-}
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index 8cbf8d9c3..ed514d6d4 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -3,9 +3,9 @@ package container
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
)
// Server wraps FrostFS API Container service and
@@ -81,25 +81,47 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-type containerStreamerV2 struct {
- containerGRPC.ContainerService_ListStreamServer
-}
-
-func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error {
- return s.ContainerService_ListStreamServer.Send(
- resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse),
- )
-}
-
-// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data
-// to gRPC stream.
-func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error {
- listReq := new(container.ListStreamRequest)
- if err := listReq.FromGRPCMessage(req); err != nil {
- return err
+// SetExtendedACL converts gRPC SetExtendedACLRequest message and passes it to internal Container service.
+func (s *Server) SetExtendedACL(ctx context.Context, req *containerGRPC.SetExtendedACLRequest) (*containerGRPC.SetExtendedACLResponse, error) {
+ setEACLReq := new(container.SetExtendedACLRequest)
+ if err := setEACLReq.FromGRPCMessage(req); err != nil {
+ return nil, err
}
- return s.srv.ListStream(listReq, &containerStreamerV2{
- ContainerService_ListStreamServer: gStream,
- })
+ resp, err := s.srv.SetExtendedACL(ctx, setEACLReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*containerGRPC.SetExtendedACLResponse), nil
+}
+
+// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service.
+func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) {
+ getEACLReq := new(container.GetExtendedACLRequest)
+ if err := getEACLReq.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.GetExtendedACL(ctx, getEACLReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil
+}
+
+// AnnounceUsedSpace converts gRPC AnnounceUsedSpaceRequest message and passes it to internal Container service.
+func (s *Server) AnnounceUsedSpace(ctx context.Context, req *containerGRPC.AnnounceUsedSpaceRequest) (*containerGRPC.AnnounceUsedSpaceResponse, error) {
+ announceReq := new(container.AnnounceUsedSpaceRequest)
+ if err := announceReq.FromGRPCMessage(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.srv.AnnounceUsedSpace(ctx, announceReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ToGRPCMessage().(*containerGRPC.AnnounceUsedSpaceResponse), nil
}
diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go
index 4bc3a42f8..7a3aec86c 100644
--- a/pkg/network/transport/netmap/grpc/service.go
+++ b/pkg/network/transport/netmap/grpc/service.go
@@ -3,9 +3,9 @@ package grpc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
- netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc"
)
// Server wraps FrostFS API Netmap service and
@@ -24,8 +24,7 @@ func New(c netmapsvc.Server) *Server {
// LocalNodeInfo converts gRPC request message and passes it to internal netmap service.
func (s *Server) LocalNodeInfo(
ctx context.Context,
- req *netmapGRPC.LocalNodeInfoRequest,
-) (*netmapGRPC.LocalNodeInfoResponse, error) {
+ req *netmapGRPC.LocalNodeInfoRequest) (*netmapGRPC.LocalNodeInfoResponse, error) {
nodeInfoReq := new(netmap.LocalNodeInfoRequest)
if err := nodeInfoReq.FromGRPCMessage(req); err != nil {
return nil, err
diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go
index 655b1f9fb..e1655c183 100644
--- a/pkg/network/transport/object/grpc/get.go
+++ b/pkg/network/transport/object/grpc/get.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type getStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go
index 7d7ce0e4c..391536e8e 100644
--- a/pkg/network/transport/object/grpc/range.go
+++ b/pkg/network/transport/object/grpc/range.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type getRangeStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go
index 8432707f7..a151ced09 100644
--- a/pkg/network/transport/object/grpc/search.go
+++ b/pkg/network/transport/object/grpc/search.go
@@ -1,8 +1,8 @@
package object
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
)
type searchStreamerV2 struct {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 15dacd553..7c6b395d5 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -5,10 +5,10 @@ import (
"errors"
"io"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
)
// Server wraps FrostFS API Object service and
@@ -24,51 +24,9 @@ func New(c objectSvc.ServiceServer) *Server {
}
}
-// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
-func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
- stream, err := s.srv.Patch(gStream.Context())
- if err != nil {
- return err
- }
-
- for {
- req, err := gStream.Recv()
- if err != nil {
- if errors.Is(err, io.EOF) {
- resp, err := stream.CloseAndRecv(gStream.Context())
- if err != nil {
- return err
- }
-
- return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
- }
-
- return err
- }
-
- patchReq := new(object.PatchRequest)
- if err := patchReq.FromGRPCMessage(req); err != nil {
- return err
- }
-
- if err := stream.Send(gStream.Context(), patchReq); err != nil {
- if errors.Is(err, util.ErrAbortStream) {
- resp, err := stream.CloseAndRecv(gStream.Context())
- if err != nil {
- return err
- }
-
- return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
- }
-
- return err
- }
- }
-}
-
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put(gStream.Context())
+ stream, err := s.srv.Put()
if err != nil {
return err
}
diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go
index 6fce397f3..e0dc74942 100644
--- a/pkg/network/transport/session/grpc/service.go
+++ b/pkg/network/transport/session/grpc/service.go
@@ -3,9 +3,9 @@ package session
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc"
)
// Server wraps FrostFS API Session service and
diff --git a/pkg/network/validation.go b/pkg/network/validation.go
index b5157f28f..92f650119 100644
--- a/pkg/network/validation.go
+++ b/pkg/network/validation.go
@@ -2,7 +2,6 @@ package network
import (
"errors"
- "iter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
@@ -35,8 +34,8 @@ var (
// MultiAddressIterator.
type NodeEndpointsIterator netmap.NodeInfo
-func (x NodeEndpointsIterator) Addresses() iter.Seq[string] {
- return (netmap.NodeInfo)(x).NetworkEndpoints()
+func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
+ (netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
}
func (x NodeEndpointsIterator) NumberOfAddresses() int {
diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go
index 93e44c52b..b0722cf8a 100644
--- a/pkg/services/accounting/executor.go
+++ b/pkg/services/accounting/executor.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type ServiceExecutor interface {
diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go
index 6c2df8428..ac836b71d 100644
--- a/pkg/services/accounting/morph/executor.go
+++ b/pkg/services/accounting/morph/executor.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
+func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing account")
@@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceReq
return nil, fmt.Errorf("invalid account: %w", err)
}
- amount, err := s.client.BalanceOf(ctx, id)
+ amount, err := s.client.BalanceOf(id)
if err != nil {
return nil, err
}
- balancePrecision, err := s.client.Decimals(ctx)
+ balancePrecision, err := s.client.Decimals()
if err != nil {
return nil, err
}
diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go
index a280416fb..72833c46c 100644
--- a/pkg/services/accounting/server.go
+++ b/pkg/services/accounting/server.go
@@ -3,7 +3,7 @@ package accounting
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
)
// Server is an interface of the FrostFS API Accounting service server.
diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go
index d8feb76bd..cd6ff0307 100644
--- a/pkg/services/accounting/sign.go
+++ b/pkg/services/accounting/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting"
)
type signService struct {
diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go
deleted file mode 100644
index 61fb025b8..000000000
--- a/pkg/services/apemanager/audit.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package apemanager
-
-import (
- "context"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc"
-)
-
-var _ Server = (*auditService)(nil)
-
-type auditService struct {
- next Server
- log *logger.Logger
- enabled *atomic.Bool
-}
-
-func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server {
- return &auditService{
- next: next,
- log: log,
- enabled: enabled,
- }
-}
-
-// AddChain implements Server.
-func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainRequest) (*apemanager.AddChainResponse, error) {
- res, err := a.next.AddChain(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req,
- audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
- req.GetBody().GetTarget().GetName(),
- res.GetBody().GetChainID()),
- err == nil)
-
- return res, err
-}
-
-// ListChains implements Server.
-func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChainsRequest) (*apemanager.ListChainsResponse, error) {
- res, err := a.next.ListChains(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req,
- audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
- req.GetBody().GetTarget().GetName(),
- nil),
- err == nil)
-
- return res, err
-}
-
-// RemoveChain implements Server.
-func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveChainRequest) (*apemanager.RemoveChainResponse, error) {
- res, err := a.next.RemoveChain(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req,
- audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(),
- req.GetBody().GetTarget().GetName(),
- req.GetBody().GetChainID()),
- err == nil)
-
- return res, err
-}
diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go
deleted file mode 100644
index 1d485321c..000000000
--- a/pkg/services/apemanager/errors/errors.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package errors
-
-import (
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-func ErrAPEManagerAccessDenied(reason string) error {
- err := new(apistatus.APEManagerAccessDenied)
- err.WriteReason(reason)
- return err
-}
-
-func ErrAPEManagerInvalidArgument(msg string) error {
- err := new(apistatus.InvalidArgument)
- err.SetMessage(msg)
- return err
-}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
deleted file mode 100644
index fc08fe569..000000000
--- a/pkg/services/apemanager/executor.go
+++ /dev/null
@@ -1,261 +0,0 @@
-package apemanager
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "errors"
- "fmt"
-
- ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage"
- containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape"
- apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/mr-tron/base58/base58"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "go.uber.org/zap"
-)
-
-var errEmptyBodySignature = errors.New("malformed request: empty body signature")
-
-type cfg struct {
- log *logger.Logger
-}
-
-type Service struct {
- cfg
-
- waiter Waiter
-
- cnrSrc containercore.Source
-
- contractStorage ape_contract.ProxyAdaptedContractStorage
-}
-
-type Option func(*cfg)
-
-type Waiter interface {
- WaitTxHalt(context.Context, uint32, util.Uint256) error
-}
-
-func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service {
- s := &Service{
- cnrSrc: cnrSrc,
-
- contractStorage: contractStorage,
-
- waiter: waiter,
- }
-
- for i := range opts {
- opts[i](&s.cfg)
- }
-
- if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
- }
-
- return s
-}
-
-func WithLogger(log *logger.Logger) Option {
- return func(c *cfg) {
- c.log = log
- }
-}
-
-var _ Server = (*Service)(nil)
-
-// validateContainerTargetRequest validates request for the container target.
-// It checks if request actor is the owner of the container, otherwise it denies the request.
-func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error {
- var cidSDK cidSDK.ID
- if err := cidSDK.DecodeString(cid); err != nil {
- return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err))
- }
- isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey)
- if err != nil {
- return fmt.Errorf("failed to check owner: %w", err)
- }
- if !isOwner {
- return apemanager_errors.ErrAPEManagerAccessDenied("actor must be container owner")
- }
- return nil
-}
-
-func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) {
- pub, err := getSignaturePublicKey(req.GetVerificationHeader())
- if err != nil {
- return nil, err
- }
-
- chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw())
- if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error())
- }
- if len(chain.ID) == 0 {
- const randomIDLength = 10
- randID, err := base58Str(randomIDLength)
- if err != nil {
- return nil, fmt.Errorf("randomize chain ID error: %w", err)
- }
- chain.ID = apechain.ID(randID)
- }
-
- var target policy_engine.Target
-
- switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
- case apeV2.TargetTypeContainer:
- reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
- return nil, err
- }
- target = policy_engine.ContainerTarget(reqCID)
- default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
- }
-
- txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain)
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
- return nil, err
- }
-
- body := new(apemanagerV2.AddChainResponseBody)
- body.SetChainID(chain.ID)
-
- resp := new(apemanagerV2.AddChainResponse)
- resp.SetBody(body)
-
- return resp, nil
-}
-
-func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) {
- pub, err := getSignaturePublicKey(req.GetVerificationHeader())
- if err != nil {
- return nil, err
- }
-
- var target policy_engine.Target
-
- switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
- case apeV2.TargetTypeContainer:
- reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
- return nil, err
- }
- target = policy_engine.ContainerTarget(reqCID)
- default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
- }
-
- txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID())
- if err != nil {
- return nil, err
- }
- if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil {
- return nil, err
- }
-
- body := new(apemanagerV2.RemoveChainResponseBody)
-
- resp := new(apemanagerV2.RemoveChainResponse)
- resp.SetBody(body)
-
- return resp, nil
-}
-
-func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) {
- pub, err := getSignaturePublicKey(req.GetVerificationHeader())
- if err != nil {
- return nil, err
- }
-
- var target policy_engine.Target
-
- switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType {
- case apeV2.TargetTypeContainer:
- reqCID := req.GetBody().GetTarget().GetName()
- if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil {
- return nil, err
- }
- target = policy_engine.ContainerTarget(reqCID)
- default:
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType))
- }
-
- chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target)
- if err != nil {
- return nil, err
- }
-
- res := make([]*apeV2.Chain, 0, len(chs))
- for _, ch := range chs {
- v2chraw := new(apeV2.ChainRaw)
- v2chraw.SetRaw(ch.Bytes())
-
- v2ch := new(apeV2.Chain)
- v2ch.SetKind(v2chraw)
-
- res = append(res, v2ch)
- }
-
- body := new(apemanagerV2.ListChainsResponseBody)
- body.SetChains(res)
-
- resp := new(apemanagerV2.ListChainsResponse)
- resp.SetBody(body)
-
- return resp, nil
-}
-
-func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicKey, error) {
- for vh.GetOrigin() != nil {
- vh = vh.GetOrigin()
- }
- sig := vh.GetBodySignature()
- if sig == nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error())
- }
- key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
- if err != nil {
- return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err))
- }
-
- return key, nil
-}
-
-func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) {
- var actor user.ID
- user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
- actorOwnerID := new(refs.OwnerID)
- actor.WriteToV2(actorOwnerID)
-
- cnr, err := s.cnrSrc.Get(ctx, cid)
- if err != nil {
- return false, fmt.Errorf("get container error: %w", err)
- }
- return cnr.Value.Owner().Equals(actor), nil
-}
-
-// base58Str generates base58 string.
-func base58Str(n int) (string, error) {
- b := make([]byte, n)
- _, err := rand.Read(b)
- if err != nil {
- return "", err
- }
- return base58.FastBase58Encoding(b), nil
-}
diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go
deleted file mode 100644
index e624177ac..000000000
--- a/pkg/services/apemanager/server.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package apemanager
-
-import (
- "context"
-
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
-)
-
-type Server interface {
- AddChain(context.Context, *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error)
- RemoveChain(context.Context, *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error)
- ListChains(context.Context, *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error)
-}
diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go
deleted file mode 100644
index a172624ff..000000000
--- a/pkg/services/apemanager/sign.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package apemanager
-
-import (
- "context"
- "crypto/ecdsa"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager"
-)
-
-type signService struct {
- sigSvc *util.SignService
-
- next Server
-}
-
-func NewSignService(key *ecdsa.PrivateKey, next Server) Server {
- return &signService{
- sigSvc: util.NewUnarySignService(key),
- next: next,
- }
-}
-
-func (s *signService) AddChain(ctx context.Context, req *apemanager_v2.AddChainRequest) (*apemanager_v2.AddChainResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(apemanager_v2.AddChainResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.next.AddChain(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
-
-func (s *signService) RemoveChain(ctx context.Context, req *apemanager_v2.RemoveChainRequest) (*apemanager_v2.RemoveChainResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(apemanager_v2.RemoveChainResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.next.RemoveChain(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
-
-func (s *signService) ListChains(ctx context.Context, req *apemanager_v2.ListChainsRequest) (*apemanager_v2.ListChainsResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(apemanager_v2.ListChainsResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.next.ListChains(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
diff --git a/pkg/services/apemanager/validation.go b/pkg/services/apemanager/validation.go
deleted file mode 100644
index b26fcf8ee..000000000
--- a/pkg/services/apemanager/validation.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package apemanager
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
-)
-
-func decodeAndValidateChain(encodedChain []byte) (chain apechain.Chain, err error) {
- if err = chain.DecodeBytes(encodedChain); err != nil {
- return
- }
- for _, rule := range chain.Rules {
- for _, name := range rule.Resources.Names {
- if err = ape.ValidateResourceName(name); err != nil {
- err = fmt.Errorf("invalid resource: %w", err)
- return
- }
- }
- }
- return
-}
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
deleted file mode 100644
index fcd3efa44..000000000
--- a/pkg/services/common/ape/checker.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package ape
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "fmt"
-
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-var (
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
-type CheckPrm struct {
- // Request is an APE-request that is checked by policy engine.
- Request aperequest.Request
-
- Namespace string
-
- Container cid.ID
-
- // An encoded container's owner user ID.
- ContainerOwner user.ID
-
- // PublicKey is public key of the request sender.
- PublicKey *keys.PublicKey
-
- // The request's bearer token. It is used in order to check APE overrides with the token.
- BearerToken *bearer.Token
-}
-
-// CheckCore provides methods to perform the common logic of APE check.
-type CheckCore interface {
- // CheckAPE performs the common policy-engine check logic on a prepared request.
- CheckAPE(ctx context.Context, prm CheckPrm) error
-}
-
-type checkerCoreImpl struct {
- LocalOverrideStorage policyengine.LocalOverrideStorage
- MorphChainStorage policyengine.MorphRuleChainStorageReader
- FrostFSSubjectProvider frostfsidcore.SubjectProvider
- State netmap.State
-}
-
-func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
- frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State,
-) CheckCore {
- return &checkerCoreImpl{
- LocalOverrideStorage: localOverrideStorage,
- MorphChainStorage: morphChainStorage,
- FrostFSSubjectProvider: frostFSSubjectProvider,
- State: state,
- }
-}
-
-// CheckAPE performs the common policy-engine check logic on a prepared request.
-func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error {
- var cr policyengine.ChainRouter
- if prm.BearerToken != nil {
- var err error
- if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
- return fmt.Errorf("bearer validation error: %w", err)
- }
- if prm.BearerToken.Impersonate() {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
- } else {
- override, isSet := prm.BearerToken.APEOverride()
- if !isSet {
- return errors.New("expected for override within bearer")
- }
- cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override)
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
- }
- } else {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
- }
-
- groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups)
- status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request)
- if err != nil {
- return err
- }
- if found && status == apechain.Allow {
- return nil
- }
- return newChainRouterError(prm.Request.Operation(), status)
-}
-
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
- if token == nil {
- return nil
- }
-
- // First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // Check for ape overrides defined in the bearer token.
- if apeOverride, isSet := token.APEOverride(); isSet {
- switch apeOverride.Target.TargetType {
- case ape.TargetTypeContainer:
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
- default:
- }
- }
-
- // Then check if container owner signed this token.
- if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
- return errBearerNotSignedByOwner
- }
-
- // Then check if request sender has rights to use this token.
- var usrSender user.ID
- user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
-
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
-
- return nil
-}
diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go
deleted file mode 100644
index d3c381de7..000000000
--- a/pkg/services/common/ape/error.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package ape
-
-import (
- "fmt"
-
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
-)
-
-// ChainRouterError is returned when chain router validation prevents
-// the APE request from being processed (no rule found, access denied, etc.).
-type ChainRouterError struct {
- operation string
- status apechain.Status
-}
-
-func (e *ChainRouterError) Error() string {
- return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status())
-}
-
-func (e *ChainRouterError) Operation() string {
- return e.operation
-}
-
-func (e *ChainRouterError) Status() apechain.Status {
- return e.status
-}
-
-func newChainRouterError(operation string, status apechain.Status) *ChainRouterError {
- return &ChainRouterError{
- operation: operation,
- status: status,
- }
-}
diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go
new file mode 100644
index 000000000..e1ed6e496
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/calls.go
@@ -0,0 +1,307 @@
+package loadcontroller
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "go.uber.org/zap"
+)
+
+// StartPrm groups the required parameters of the Controller.Start method.
+type StartPrm struct {
+ // Epoch number by which you want to select
+ // the values of the used space of containers.
+ Epoch uint64
+}
+
+type commonContext struct {
+ epoch uint64
+
+ ctrl *Controller
+
+ log *logger.Logger
+}
+
+type announcer struct {
+ commonContext
+}
+
+// Start starts the processing of container.SizeEstimation values.
+//
+// Single Start operation overtakes all data from LocalMetrics to
+// LocalAnnouncementTarget (Controller's parameters).
+// No filter by epoch is used for the iterator, since it is expected
+// that the source of metrics does not track the change of epochs.
+//
+// Each call acquires an announcement context for an Epoch parameter.
+// At the very end of the operation, the context is released.
+func (c *Controller) Start(ctx context.Context, prm StartPrm) {
+ var announcer *announcer
+ // acquire announcement
+ ctx, announcer = c.acquireAnnouncement(ctx, prm)
+ if announcer == nil {
+ return
+ }
+
+ // finally stop and free the announcement
+ defer announcer.freeAnnouncement()
+
+ // announce local values
+ announcer.announce(ctx)
+}
+
+func (c *announcer) announce(ctx context.Context) {
+ c.log.Debug(logs.ControllerStartingToAnnounceTheValuesOfTheMetrics)
+
+ var (
+ metricsIterator Iterator
+ err error
+ )
+
+ // initialize iterator over locally collected metrics
+ metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator()
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // initialize target of local announcements
+ targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(nil)
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotInitializeAnnouncementAccumulator,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // iterate over all collected metrics and write them to the target
+ err = metricsIterator.Iterate(
+ func(container.SizeEstimation) bool {
+ return true // local metrics don't know about epochs
+ },
+ func(a container.SizeEstimation) error {
+ a.SetEpoch(c.epoch) // set epoch explicitly
+ return targetWriter.Put(a)
+ },
+ )
+ if err != nil {
+ c.log.Debug(logs.ControllerIteratorOverLocallyCollectedMetricsAborted,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // finish writing
+ err = targetWriter.Close(ctx)
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotFinishWritingLocalAnnouncements,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ c.log.Debug(logs.ControllerTrustAnnouncementSuccessfullyFinished)
+}
+
+func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (context.Context, *announcer) {
+ started := true
+ c.announceMtx.Lock()
+ {
+ if cancel := c.mAnnounceCtx[prm.Epoch]; cancel == nil {
+ ctx, cancel = context.WithCancel(ctx)
+ c.mAnnounceCtx[prm.Epoch] = cancel
+ started = false
+ }
+ }
+ c.announceMtx.Unlock()
+
+ log := &logger.Logger{Logger: c.opts.log.With(
+ zap.Uint64("epoch", prm.Epoch),
+ )}
+
+ if started {
+ log.Debug(logs.ControllerAnnouncementIsAlreadyStarted)
+ return ctx, nil
+ }
+
+ return ctx, &announcer{
+ commonContext: commonContext{
+ epoch: prm.Epoch,
+ ctrl: c,
+ log: log,
+ },
+ }
+}
+
+func (c *commonContext) freeAnnouncement() {
+ var stopped bool
+
+ c.ctrl.announceMtx.Lock()
+
+ {
+ var cancel context.CancelFunc
+
+ cancel, stopped = c.ctrl.mAnnounceCtx[c.epoch]
+
+ if stopped {
+ cancel()
+ delete(c.ctrl.mAnnounceCtx, c.epoch)
+ }
+ }
+
+ c.ctrl.announceMtx.Unlock()
+
+ if stopped {
+ c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted)
+ } else {
+ c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted)
+ }
+}
+
+// StopPrm groups the required parameters of the Controller.Stop method.
+type StopPrm struct {
+ // Epoch number the analysis of the values of which must be interrupted.
+ Epoch uint64
+}
+
+type reporter struct {
+ commonContext
+}
+
+// Stop interrupts the processing of container.SizeEstimation values.
+//
+// Single Stop operation releases an announcement context and overtakes
+// all data from AnnouncementAccumulator to ResultReceiver (Controller's
+// parameters). Only values for the specified Epoch parameter are processed.
+//
+// Each call acquires a report context for an Epoch parameter.
+// At the very end of the operation, the context is released.
+func (c *Controller) Stop(ctx context.Context, prm StopPrm) {
+ var reporter *reporter
+ ctx, reporter = c.acquireReport(ctx, prm)
+ if reporter == nil {
+ return
+ }
+
+ // finally stop and free reporting
+ defer reporter.freeReport()
+
+ // interrupt announcement
+ reporter.freeAnnouncement()
+
+ // report the estimations
+ reporter.report(ctx)
+}
+
+func (c *Controller) acquireReport(ctx context.Context, prm StopPrm) (context.Context, *reporter) {
+ started := true
+
+ c.reportMtx.Lock()
+ {
+ if cancel := c.mReportCtx[prm.Epoch]; cancel == nil {
+ ctx, cancel = context.WithCancel(ctx)
+ c.mReportCtx[prm.Epoch] = cancel
+ started = false
+ }
+ }
+
+ c.reportMtx.Unlock()
+
+ log := &logger.Logger{Logger: c.opts.log.With(
+ zap.Uint64("epoch", prm.Epoch),
+ )}
+
+ if started {
+ log.Debug(logs.ControllerReportIsAlreadyStarted)
+ return ctx, nil
+ }
+
+ return ctx, &reporter{
+ commonContext: commonContext{
+ epoch: prm.Epoch,
+ ctrl: c,
+ log: log,
+ },
+ }
+}
+
+func (c *commonContext) freeReport() {
+ var stopped bool
+
+ c.ctrl.reportMtx.Lock()
+
+ {
+ var cancel context.CancelFunc
+
+ cancel, stopped = c.ctrl.mReportCtx[c.epoch]
+
+ if stopped {
+ cancel()
+ delete(c.ctrl.mReportCtx, c.epoch)
+ }
+ }
+
+ c.ctrl.reportMtx.Unlock()
+
+ if stopped {
+ c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted)
+ } else {
+ c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted)
+ }
+}
+
+func (c *reporter) report(ctx context.Context) {
+ var (
+ localIterator Iterator
+ err error
+ )
+
+ // initialize iterator over locally accumulated announcements
+ localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator()
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // initialize final destination of load estimations
+ resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(nil)
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotInitializeResultTarget,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // iterate over all accumulated announcements and write them to the target
+ err = localIterator.Iterate(
+ usedSpaceFilterEpochEQ(c.epoch),
+ resultWriter.Put,
+ )
+ if err != nil {
+ c.log.Debug(logs.ControllerIteratorOverLocalAnnouncementsAborted,
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ // finish writing
+ err = resultWriter.Close(ctx)
+ if err != nil {
+ c.log.Debug(logs.ControllerCouldNotFinishWritingLoadEstimations,
+ zap.String("error", err.Error()),
+ )
+ }
+}
diff --git a/pkg/services/container/announcement/load/controller/calls_test.go b/pkg/services/container/announcement/load/controller/calls_test.go
new file mode 100644
index 000000000..6ca24e869
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/calls_test.go
@@ -0,0 +1,192 @@
+package loadcontroller_test
+
+import (
+ "context"
+ "math/rand"
+ "sync"
+ "testing"
+
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+type testAnnouncementStorage struct {
+ w loadcontroller.Writer
+
+ i loadcontroller.Iterator
+
+ mtx sync.RWMutex
+
+ m map[uint64][]container.SizeEstimation
+}
+
+func newTestStorage() *testAnnouncementStorage {
+ return &testAnnouncementStorage{
+ m: make(map[uint64][]container.SizeEstimation),
+ }
+}
+
+func (s *testAnnouncementStorage) InitIterator() (loadcontroller.Iterator, error) {
+ if s.i != nil {
+ return s.i, nil
+ }
+
+ return s, nil
+}
+
+func (s *testAnnouncementStorage) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontroller.UsedSpaceHandler) error {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ for _, v := range s.m {
+ for _, a := range v {
+ if f(a) {
+ if err := h(a); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *testAnnouncementStorage) InitWriter([]loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
+ if s.w != nil {
+ return s.w, nil
+ }
+
+ return s, nil
+}
+
+func (s *testAnnouncementStorage) Put(v container.SizeEstimation) error {
+ s.mtx.Lock()
+ s.m[v.Epoch()] = append(s.m[v.Epoch()], v)
+ s.mtx.Unlock()
+
+ return nil
+}
+
+func (s *testAnnouncementStorage) Close(context.Context) error {
+ return nil
+}
+
+func randAnnouncement() (a container.SizeEstimation) {
+ a.SetContainer(cidtest.ID())
+ a.SetValue(rand.Uint64())
+
+ return
+}
+
+func TestSimpleScenario(t *testing.T) {
+ // create storage to write final estimations
+ resultStorage := newTestStorage()
+
+ // create storages to accumulate announcements
+ accumulatingStorageN2 := newTestStorage()
+
+ // create storage of local metrics
+ localStorageN1 := newTestStorage()
+ localStorageN2 := newTestStorage()
+
+ // create 2 controllers: 1st writes announcements to 2nd, 2nd directly to final destination
+ ctrlN1 := loadcontroller.New(loadcontroller.Prm{
+ LocalMetrics: localStorageN1,
+ AnnouncementAccumulator: newTestStorage(),
+ LocalAnnouncementTarget: &testAnnouncementStorage{
+ w: accumulatingStorageN2,
+ },
+ ResultReceiver: resultStorage,
+ })
+
+ ctrlN2 := loadcontroller.New(loadcontroller.Prm{
+ LocalMetrics: localStorageN2,
+ AnnouncementAccumulator: accumulatingStorageN2,
+ LocalAnnouncementTarget: &testAnnouncementStorage{
+ w: resultStorage,
+ },
+ ResultReceiver: resultStorage,
+ })
+
+ const processEpoch uint64 = 10
+
+ const goodNum = 4
+
+ // create 2 random values for processing epoch and 1 for some different
+ announces := make([]container.SizeEstimation, 0, goodNum)
+
+ for i := 0; i < goodNum; i++ {
+ a := randAnnouncement()
+ a.SetEpoch(processEpoch)
+
+ announces = append(announces, a)
+ }
+
+ // store one half of "good" announcements to 1st metrics storage, another - to 2nd
+ // and "bad" to both
+ for i := 0; i < goodNum/2; i++ {
+ require.NoError(t, localStorageN1.Put(announces[i]))
+ }
+
+ for i := goodNum / 2; i < goodNum; i++ {
+ require.NoError(t, localStorageN2.Put(announces[i]))
+ }
+
+ wg := new(sync.WaitGroup)
+ wg.Add(2)
+
+ startPrm := loadcontroller.StartPrm{
+ Epoch: processEpoch,
+ }
+
+ // start both controllers
+ go func() {
+ ctrlN1.Start(context.Background(), startPrm)
+ wg.Done()
+ }()
+
+ go func() {
+ ctrlN2.Start(context.Background(), startPrm)
+ wg.Done()
+ }()
+
+ wg.Wait()
+ wg.Add(2)
+
+ stopPrm := loadcontroller.StopPrm{
+ Epoch: processEpoch,
+ }
+
+ // stop both controllers
+ go func() {
+ ctrlN1.Stop(context.Background(), stopPrm)
+ wg.Done()
+ }()
+
+ go func() {
+ ctrlN2.Stop(context.Background(), stopPrm)
+ wg.Done()
+ }()
+
+ wg.Wait()
+
+ // result target should contain all "good" announcements and shoult not container the "bad" one
+ var res []container.SizeEstimation
+
+ err := resultStorage.Iterate(
+ func(a container.SizeEstimation) bool {
+ return true
+ },
+ func(a container.SizeEstimation) error {
+ res = append(res, a)
+ return nil
+ },
+ )
+ require.NoError(t, err)
+
+ for i := range announces {
+ require.Contains(t, res, announces[i])
+ }
+}
diff --git a/pkg/services/container/announcement/load/controller/controller.go b/pkg/services/container/announcement/load/controller/controller.go
new file mode 100644
index 000000000..ef6dbade7
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/controller.go
@@ -0,0 +1,94 @@
+package loadcontroller
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+// Prm groups the required parameters of the Controller's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Iterator over the used space values of the containers
+ // collected by the node locally.
+ LocalMetrics IteratorProvider
+
+ // Place of recording the local values of
+ // the used space of containers.
+ LocalAnnouncementTarget WriterProvider
+
+ // Iterator over the summarized used space scores
+ // from the various network participants.
+ AnnouncementAccumulator IteratorProvider
+
+ // Place of recording the final estimates of
+ // the used space of containers.
+ ResultReceiver WriterProvider
+}
+
+// Controller represents main handler for starting
+// and interrupting container volume estimation.
+//
+// It binds the interfaces of the local value stores
+// to the target storage points. Controller is abstracted
+// from the internal storage device and the network location
+// of the connecting components. At its core, it is a
+// high-level start-stop trigger for calculations.
+//
+// For correct operation, the controller must be created
+// using the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// the constructor is immediately ready to work through
+// API of external control of calculations and data transfer.
+type Controller struct {
+ prm Prm
+
+ opts *options
+
+ announceMtx sync.Mutex
+ mAnnounceCtx map[uint64]context.CancelFunc
+
+ reportMtx sync.Mutex
+ mReportCtx map[uint64]context.CancelFunc
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the Controller.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created Controller does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm, opts ...Option) *Controller {
+ switch {
+ case prm.LocalMetrics == nil:
+ panicOnPrmValue("LocalMetrics", prm.LocalMetrics)
+ case prm.AnnouncementAccumulator == nil:
+ panicOnPrmValue("AnnouncementAccumulator", prm.AnnouncementAccumulator)
+ case prm.LocalAnnouncementTarget == nil:
+ panicOnPrmValue("LocalAnnouncementTarget", prm.LocalAnnouncementTarget)
+ case prm.ResultReceiver == nil:
+ panicOnPrmValue("ResultReceiver", prm.ResultReceiver)
+ }
+
+ o := defaultOpts()
+
+ for _, opt := range opts {
+ opt(o)
+ }
+
+ return &Controller{
+ prm: prm,
+ opts: o,
+ mAnnounceCtx: make(map[uint64]context.CancelFunc),
+ mReportCtx: make(map[uint64]context.CancelFunc),
+ }
+}
diff --git a/pkg/services/container/announcement/load/controller/deps.go b/pkg/services/container/announcement/load/controller/deps.go
new file mode 100644
index 000000000..99da8594f
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/deps.go
@@ -0,0 +1,103 @@
+package loadcontroller
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+// UsedSpaceHandler describes the signature of the container.SizeEstimation
+// value handling function.
+//
+// Termination of processing without failures is usually signaled
+// with a zero error, while a specific value may describe the reason
+// for failure.
+type UsedSpaceHandler func(container.SizeEstimation) error
+
+// UsedSpaceFilter describes the signature of the function for
+// checking whether a value meets a certain criterion.
+//
+// Return of true means conformity, false - vice versa.
+type UsedSpaceFilter func(container.SizeEstimation) bool
+
+// Iterator is a group of methods provided by entity
+// which can iterate over a group of container.SizeEstimation values.
+type Iterator interface {
+ // Iterate must start an iterator over values that
+ // meet the filter criterion (returns true).
+ // For each such value should call a handler, the error
+ // of which should be directly returned from the method.
+ //
+ // Internal failures of the iterator are also signaled via
+ // an error. After a successful call to the last value
+ // handler, nil should be returned.
+ Iterate(UsedSpaceFilter, UsedSpaceHandler) error
+}
+
+// IteratorProvider is a group of methods provided
+// by entity which generates iterators over
+// container.SizeEstimation values.
+type IteratorProvider interface {
+ // InitIterator should return an initialized Iterator.
+ //
+ // Initialization problems are reported via error.
+ // If no error was returned, then the Iterator must not be nil.
+ //
+ // Implementations can have different logic for different
+ // contexts, so specific ones may document their own behavior.
+ InitIterator() (Iterator, error)
+}
+
+// Writer describes the interface for storing container.SizeEstimation values.
+//
+// This interface is provided by both local storage
+// of values and remote (wrappers over the RPC).
+type Writer interface {
+ // Put performs a write operation of container.SizeEstimation value
+ // and returns any error encountered.
+ //
+ // All values after the Close call must be flushed to the
+ // physical target. Implementations can cache values before
+ // Close operation.
+ //
+ // Put must not be called after Close.
+ Put(container.SizeEstimation) error
+
+ // Close exits with method-providing Writer.
+ //
+ // All cached values must be flushed before
+ // the Close's return.
+ //
+ // Methods must not be called after Close.
+ Close(ctx context.Context) error
+}
+
+// WriterProvider is a group of methods provided
+// by entity which generates keepers of
+// container.SizeEstimation values.
+type WriterProvider interface {
+ // InitWriter should return an initialized Writer.
+ //
+ // Initialization problems are reported via error.
+ // If no error was returned, then the Writer must not be nil.
+ InitWriter(route []ServerInfo) (Writer, error)
+}
+
+// ServerInfo describes a set of
+// characteristics of a point in a route.
+type ServerInfo interface {
+ // PublicKey returns public key of the node
+ // from the route in a binary representation.
+ PublicKey() []byte
+
+ // Iterates over network addresses of the node
+ // in the route. Breaks iterating on true return
+ // of the handler.
+ IterateAddresses(func(string) bool)
+
+ // Returns number of server's network addresses.
+ NumberOfAddresses() int
+
+ // ExternalAddresses returns external node's addresses.
+ ExternalAddresses() []string
+}
diff --git a/pkg/services/container/announcement/load/controller/opts.go b/pkg/services/container/announcement/load/controller/opts.go
new file mode 100644
index 000000000..29148def0
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/opts.go
@@ -0,0 +1,28 @@
+package loadcontroller
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
+)
+
+// Option sets an optional parameter of Controller.
+type Option func(*options)
+
+type options struct {
+ log *logger.Logger
+}
+
+func defaultOpts() *options {
+ return &options{
+ log: &logger.Logger{Logger: zap.L()},
+ }
+}
+
+// WithLogger returns option to specify logging component.
+func WithLogger(l *logger.Logger) Option {
+ return func(o *options) {
+ if l != nil {
+ o.log = l
+ }
+ }
+}
diff --git a/pkg/services/container/announcement/load/controller/util.go b/pkg/services/container/announcement/load/controller/util.go
new file mode 100644
index 000000000..223de13ba
--- /dev/null
+++ b/pkg/services/container/announcement/load/controller/util.go
@@ -0,0 +1,36 @@
+package loadcontroller
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+func usedSpaceFilterEpochEQ(epoch uint64) UsedSpaceFilter {
+ return func(a container.SizeEstimation) bool {
+ return a.Epoch() == epoch
+ }
+}
+
+type storageWrapper struct {
+ w Writer
+ i Iterator
+}
+
+func (s storageWrapper) InitIterator() (Iterator, error) {
+ return s.i, nil
+}
+
+func (s storageWrapper) InitWriter([]ServerInfo) (Writer, error) {
+ return s.w, nil
+}
+
+func SimpleIteratorProvider(i Iterator) IteratorProvider {
+ return &storageWrapper{
+ i: i,
+ }
+}
+
+func SimpleWriterProvider(w Writer) WriterProvider {
+ return &storageWrapper{
+ w: w,
+ }
+}
diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go
new file mode 100644
index 000000000..9a483aed0
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/calls.go
@@ -0,0 +1,145 @@
+package loadroute
+
+import (
+ "context"
+ "encoding/hex"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "go.uber.org/zap"
+)
+
+// InitWriter initializes and returns Writer that sends each value to its next route point.
+//
+// If route is present, then it is taken into account,
+// and the value will be sent to its continuation. Otherwise, the route will be laid
+// from scratch and the value will be sent to its primary point.
+//
+// After building a list of remote points of the next leg of the route, the value is sent
+// sequentially to all of them. If any transmissions (even all) fail, an error will not
+// be returned.
+//
+// Close of the composed Writer calls Close method on each internal Writer generated in
+// runtime and never returns an error.
+//
+// Always returns nil error.
+func (r *Router) InitWriter(route []loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
+ if len(route) == 0 {
+ route = []loadcontroller.ServerInfo{r.localSrvInfo}
+ }
+
+ return &loadWriter{
+ router: r,
+ route: route,
+ mRoute: make(map[routeKey]*valuesRoute),
+ mServers: make(map[string]loadcontroller.Writer),
+ }, nil
+}
+
+type routeKey struct {
+ epoch uint64
+
+ cid string
+}
+
+type valuesRoute struct {
+ route []loadcontroller.ServerInfo
+
+ values []container.SizeEstimation
+}
+
+type loadWriter struct {
+ router *Router
+
+ route []loadcontroller.ServerInfo
+
+ routeMtx sync.RWMutex
+ mRoute map[routeKey]*valuesRoute
+
+ mServers map[string]loadcontroller.Writer
+}
+
+func (w *loadWriter) Put(a container.SizeEstimation) error {
+ w.routeMtx.Lock()
+ defer w.routeMtx.Unlock()
+
+ key := routeKey{
+ epoch: a.Epoch(),
+ cid: a.Container().EncodeToString(),
+ }
+
+ routeValues, ok := w.mRoute[key]
+ if !ok {
+ route, err := w.router.routeBuilder.NextStage(a, w.route)
+ if err != nil {
+ return err
+ } else if len(route) == 0 {
+ route = []loadcontroller.ServerInfo{nil}
+ }
+
+ routeValues = &valuesRoute{
+ route: route,
+ values: []container.SizeEstimation{a},
+ }
+
+ w.mRoute[key] = routeValues
+ }
+
+ for _, remoteInfo := range routeValues.route {
+ var key string
+
+ if remoteInfo != nil {
+ key = hex.EncodeToString(remoteInfo.PublicKey())
+ }
+
+ remoteWriter, ok := w.mServers[key]
+ if !ok {
+ provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
+ if err != nil {
+ w.router.log.Debug(logs.RouteCouldNotInitializeWriterProvider,
+ zap.String("error", err.Error()),
+ )
+
+ continue // best effort
+ }
+
+ remoteWriter, err = provider.InitWriter(w.route)
+ if err != nil {
+ w.router.log.Debug(logs.RouteCouldNotInitializeWriter,
+ zap.String("error", err.Error()),
+ )
+
+ continue // best effort
+ }
+
+ w.mServers[key] = remoteWriter
+ }
+
+ err := remoteWriter.Put(a)
+ if err != nil {
+ w.router.log.Debug(logs.RouteCouldNotPutTheValue,
+ zap.String("error", err.Error()),
+ )
+ }
+
+ // continue best effort
+ }
+
+ return nil
+}
+
+func (w *loadWriter) Close(ctx context.Context) error {
+ for key, wRemote := range w.mServers {
+ err := wRemote.Close(ctx)
+ if err != nil {
+ w.router.log.Debug(logs.RouteCouldNotCloseRemoteServerWriter,
+ zap.String("key", key),
+ zap.String("error", err.Error()),
+ )
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/services/container/announcement/load/route/deps.go b/pkg/services/container/announcement/load/route/deps.go
new file mode 100644
index 000000000..b255900f7
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/deps.go
@@ -0,0 +1,31 @@
+package loadroute
+
+import (
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+// Builder groups methods to route values in the network.
+type Builder interface {
+ // NextStage must return next group of route points for the value a
+ // based on the passed route.
+ //
+ // Empty passed list means being at the starting point of the route.
+ //
+ // Must return empty list and no error if the endpoint of the route is reached.
+ // If there are more than one point to go and the last passed point is included
+ // in that list (means that point is the last point in one of the route groups),
+ // returned route must contain nil point that should be interpreted as signal to,
+ // among sending to other route points, save the announcement in that point.
+ NextStage(a container.SizeEstimation, passed []loadcontroller.ServerInfo) ([]loadcontroller.ServerInfo, error)
+}
+
+// RemoteWriterProvider describes the component
+// for sending values to a fixed route point.
+type RemoteWriterProvider interface {
+ // InitRemote must return WriterProvider to the route point
+ // corresponding to info.
+ //
+ // Nil info matches the end of the route.
+ InitRemote(info loadcontroller.ServerInfo) (loadcontroller.WriterProvider, error)
+}
diff --git a/pkg/services/container/announcement/load/route/opts.go b/pkg/services/container/announcement/load/route/opts.go
new file mode 100644
index 000000000..ab140ab4c
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/opts.go
@@ -0,0 +1,28 @@
+package loadroute
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
+)
+
+// Option sets an optional parameter of Router.
+type Option func(*options)
+
+type options struct {
+ log *logger.Logger
+}
+
+func defaultOpts() *options {
+ return &options{
+ log: &logger.Logger{Logger: zap.L()},
+ }
+}
+
+// WithLogger returns Option to specify logging component.
+func WithLogger(l *logger.Logger) Option {
+ return func(o *options) {
+ if l != nil {
+ o.log = l
+ }
+ }
+}
diff --git a/pkg/services/container/announcement/load/route/placement/builder.go b/pkg/services/container/announcement/load/route/placement/builder.go
new file mode 100644
index 000000000..493b89723
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/placement/builder.go
@@ -0,0 +1,49 @@
+package placementrouter
+
+import "fmt"
+
+// Prm groups the required parameters of the Builder's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Calculator of the container members.
+ //
+ // Must not be nil.
+ PlacementBuilder PlacementBuilder
+}
+
+// Builder represents component that routes used container space
+// values between nodes from the container.
+//
+// For correct operation, Builder must be created using
+// the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// the Builder is immediately ready to work through API.
+type Builder struct {
+ placementBuilder PlacementBuilder
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the Builder.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created Builder does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm) *Builder {
+ switch {
+ case prm.PlacementBuilder == nil:
+ panicOnPrmValue("PlacementBuilder", prm.PlacementBuilder)
+ }
+
+ return &Builder{
+ placementBuilder: prm.PlacementBuilder,
+ }
+}
diff --git a/pkg/services/container/announcement/load/route/placement/calls.go b/pkg/services/container/announcement/load/route/placement/calls.go
new file mode 100644
index 000000000..68bdb43a7
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/placement/calls.go
@@ -0,0 +1,47 @@
+package placementrouter
+
+import (
+ "bytes"
+ "fmt"
+
+ netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+// NextStage composes container nodes for the container and epoch from a,
+// and returns the list of nodes with maximum weight (one from each vector).
+//
+// If passed route has more than one point, then endpoint of the route is reached.
+//
+// The traversed route is not checked, it is assumed to be correct.
+func (b *Builder) NextStage(a container.SizeEstimation, passed []loadcontroller.ServerInfo) ([]loadcontroller.ServerInfo, error) {
+ if len(passed) > 1 {
+ return nil, nil
+ }
+
+ cnr := a.Container()
+
+ placement, err := b.placementBuilder.BuildPlacement(a.Epoch(), cnr)
+ if err != nil {
+ return nil, fmt.Errorf("could not build placement %s: %w", cnr, err)
+ }
+
+ res := make([]loadcontroller.ServerInfo, 0, len(placement))
+
+ for i := range placement {
+ if len(placement[i]) == 0 {
+ continue
+ }
+
+ if len(passed) == 1 && bytes.Equal(passed[0].PublicKey(), placement[i][0].PublicKey()) {
+ // add nil element so the announcement will be saved in local memory
+ res = append(res, nil)
+ } else {
+ // add element with remote node to send announcement to
+ res = append(res, netmapcore.Node(placement[i][0]))
+ }
+ }
+
+ return res, nil
+}
diff --git a/pkg/services/container/announcement/load/route/placement/deps.go b/pkg/services/container/announcement/load/route/placement/deps.go
new file mode 100644
index 000000000..43339eb47
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/placement/deps.go
@@ -0,0 +1,14 @@
+package placementrouter
+
+import (
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+)
+
+// PlacementBuilder describes interface of FrostFS placement calculator.
+type PlacementBuilder interface {
+ // BuildPlacement must compose and sort (according to a specific algorithm)
+ // storage nodes from the container by its identifier using network map
+ // of particular epoch.
+ BuildPlacement(epoch uint64, cnr cid.ID) ([][]netmap.NodeInfo, error)
+}
diff --git a/pkg/services/container/announcement/load/route/router.go b/pkg/services/container/announcement/load/route/router.go
new file mode 100644
index 000000000..c8f784b16
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/router.go
@@ -0,0 +1,87 @@
+package loadroute
+
+import (
+ "fmt"
+
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+)
+
+// Prm groups the required parameters of the Router's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Characteristics of the local node's server.
+ //
+ // Must not be nil.
+ LocalServerInfo loadcontroller.ServerInfo
+
+ // Component for sending values to a fixed route point.
+ //
+ // Must not be nil.
+ RemoteWriterProvider RemoteWriterProvider
+
+ // Route planner.
+ //
+ // Must not be nil.
+ Builder Builder
+}
+
+// Router represents component responsible for routing
+// used container space values over the network.
+//
+// For each fixed pair (container ID, epoch) there is a
+// single value route on the network. Router provides the
+// interface for writing values to the next point of the route.
+//
+// For correct operation, Router must be created using
+// the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// the Router is immediately ready to work through API.
+type Router struct {
+ log *logger.Logger
+
+ remoteProvider RemoteWriterProvider
+
+ routeBuilder Builder
+
+ localSrvInfo loadcontroller.ServerInfo
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the Router.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created Router does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm, opts ...Option) *Router {
+ switch {
+ case prm.RemoteWriterProvider == nil:
+ panicOnPrmValue("RemoteWriterProvider", prm.RemoteWriterProvider)
+ case prm.Builder == nil:
+ panicOnPrmValue("Builder", prm.Builder)
+ case prm.LocalServerInfo == nil:
+ panicOnPrmValue("LocalServerInfo", prm.LocalServerInfo)
+ }
+
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ return &Router{
+ log: o.log,
+ remoteProvider: prm.RemoteWriterProvider,
+ routeBuilder: prm.Builder,
+ localSrvInfo: prm.LocalServerInfo,
+ }
+}
diff --git a/pkg/services/container/announcement/load/route/util.go b/pkg/services/container/announcement/load/route/util.go
new file mode 100644
index 000000000..ea0f51aad
--- /dev/null
+++ b/pkg/services/container/announcement/load/route/util.go
@@ -0,0 +1,49 @@
+package loadroute
+
+import (
+ "bytes"
+ "errors"
+
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+var errWrongRoute = errors.New("wrong route")
+
+// CheckRoute checks if the route is a route correctly constructed by the builder for value a.
+//
+// Returns nil if route is correct, otherwise an error clarifying the inconsistency.
+func CheckRoute(builder Builder, a container.SizeEstimation, route []loadcontroller.ServerInfo) error {
+ for i := 1; i < len(route); i++ {
+ servers, err := builder.NextStage(a, route[:i])
+ if err != nil {
+ return err
+ } else if len(servers) == 0 {
+ break
+ }
+
+ found := false
+
+ for j := range servers {
+ if servers[j] == nil {
+ // nil route point means that
+ // (i-1)-th node in the route
+ // must, among other things,
+ // save the announcement to its
+ // local memory
+ continue
+ }
+
+ if bytes.Equal(servers[j].PublicKey(), route[i].PublicKey()) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return errWrongRoute
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/services/container/announcement/load/storage/storage.go b/pkg/services/container/announcement/load/storage/storage.go
new file mode 100644
index 000000000..4d3104c76
--- /dev/null
+++ b/pkg/services/container/announcement/load/storage/storage.go
@@ -0,0 +1,151 @@
+package loadstorage
+
+import (
+ "context"
+ "sort"
+ "sync"
+
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+)
+
+type usedSpaceEstimations struct {
+ announcement container.SizeEstimation
+
+ sizes []uint64
+}
+
+type storageKey struct {
+ epoch uint64
+
+ cid string
+}
+
+// Storage represents in-memory storage of
+// container.SizeEstimation values.
+//
+// The write operation has the usual behavior - to save
+// the next number of used container space for a specific epoch.
+// All values related to one key (epoch, container ID) are stored
+// as a list.
+//
+// Storage also provides an iterator interface, into the handler
+// of which the final score is passed, built on all values saved
+// at the time of the call. Currently the only possible estimation
+// formula is used - the average between 10th and 90th percentile.
+//
+// For correct operation, Storage must be created
+// using the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// Storage is immediately ready to work through API.
+type Storage struct {
+ mtx sync.RWMutex
+
+ mItems map[storageKey]*usedSpaceEstimations
+}
+
+// Prm groups the required parameters of the Storage's constructor.
+//
+// The component is not parameterizable at the moment.
+type Prm struct{}
+
+// New creates a new instance of the Storage.
+//
+// The created Storage does not require additional
+// initialization and is completely ready for work.
+func New(_ Prm) *Storage {
+ return &Storage{
+ mItems: make(map[storageKey]*usedSpaceEstimations),
+ }
+}
+
+// Put appends the next value of the occupied container space for the epoch
+// to the list of already saved values.
+//
+// Always returns nil error.
+func (s *Storage) Put(a container.SizeEstimation) error {
+ s.mtx.Lock()
+
+ {
+ key := storageKey{
+ epoch: a.Epoch(),
+ cid: a.Container().EncodeToString(),
+ }
+
+ estimations, ok := s.mItems[key]
+ if !ok {
+ estimations = &usedSpaceEstimations{
+ announcement: a,
+ sizes: make([]uint64, 0, 1),
+ }
+
+ s.mItems[key] = estimations
+ }
+
+ estimations.sizes = append(estimations.sizes, a.Value())
+ }
+
+ s.mtx.Unlock()
+
+ return nil
+}
+
+func (s *Storage) Close(context.Context) error {
+ return nil
+}
+
+// Iterate goes through all the lists with the key (container ID, epoch),
+// calculates the final grade for all values, and passes it to the handler.
+//
+// Final grade is the average between 10th and 90th percentiles.
+func (s *Storage) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontroller.UsedSpaceHandler) (err error) {
+ s.mtx.RLock()
+
+ {
+ for _, v := range s.mItems {
+ if f(v.announcement) {
+ // calculate estimation based on 90th percentile
+ v.announcement.SetValue(finalEstimation(v.sizes))
+
+ if err = h(v.announcement); err != nil {
+ break
+ }
+ }
+ }
+ }
+
+ s.mtx.RUnlock()
+
+ return
+}
+
+func finalEstimation(vals []uint64) uint64 {
+ sort.Slice(vals, func(i, j int) bool {
+ return vals[i] < vals[j]
+ })
+
+ const (
+ lowerRank = 10
+ upperRank = 90
+ )
+
+ if len(vals) >= lowerRank {
+ lowerInd := percentile(lowerRank, vals)
+ upperInd := percentile(upperRank, vals)
+
+ vals = vals[lowerInd:upperInd]
+ }
+
+ sum := uint64(0)
+
+ for i := range vals {
+ sum += vals[i]
+ }
+
+ return sum / uint64(len(vals))
+}
+
+func percentile(rank int, vals []uint64) int {
+ p := len(vals) * rank / 100
+ return p
+}
diff --git a/pkg/services/container/announcement/load/storage/storage_test.go b/pkg/services/container/announcement/load/storage/storage_test.go
new file mode 100644
index 000000000..20e73627d
--- /dev/null
+++ b/pkg/services/container/announcement/load/storage/storage_test.go
@@ -0,0 +1,50 @@
+package loadstorage
+
+import (
+ "math/rand"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStorage(t *testing.T) {
+ const epoch uint64 = 13
+
+ var a container.SizeEstimation
+ a.SetContainer(cidtest.ID())
+ a.SetEpoch(epoch)
+
+ const opinionsNum = 100
+
+ s := New(Prm{})
+
+ opinions := make([]uint64, opinionsNum)
+ for i := range opinions {
+ opinions[i] = rand.Uint64()
+
+ a.SetValue(opinions[i])
+
+ require.NoError(t, s.Put(a))
+ }
+
+ iterCounter := 0
+
+ err := s.Iterate(
+ func(ai container.SizeEstimation) bool {
+ return ai.Epoch() == epoch
+ },
+ func(ai container.SizeEstimation) error {
+ iterCounter++
+
+ require.Equal(t, epoch, ai.Epoch())
+ require.Equal(t, a.Container(), ai.Container())
+ require.Equal(t, finalEstimation(opinions), ai.Value())
+
+ return nil
+ },
+ )
+ require.NoError(t, err)
+ require.Equal(t, 1, iterCounter)
+}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
deleted file mode 100644
index 01bd825d7..000000000
--- a/pkg/services/container/ape.go
+++ /dev/null
@@ -1,735 +0,0 @@
-package container
-
-import (
- "bytes"
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/sha256"
- "encoding/hex"
- "errors"
- "fmt"
- "net"
- "strings"
-
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/peer"
-)
-
-var (
- errMissingContainerID = errors.New("missing container ID")
- errSessionContainerMissmatch = errors.New("requested container is not related to the session")
- errMissingVerificationHeader = errors.New("malformed request: empty verification header")
- errInvalidSessionTokenSignature = errors.New("malformed request: invalid session token signature")
- errInvalidSessionTokenOwner = errors.New("malformed request: invalid session token owner")
- errEmptyBodySignature = errors.New("malformed request: empty body signature")
- errMissingOwnerID = errors.New("malformed request: missing owner ID")
- errOwnerIDIsNotSet = errors.New("owner id is not set")
- errInvalidDomainZone = errors.New("invalid domain zone: no namespace is expected")
-
- undefinedContainerID = cid.ID{}
-)
-
-type ir interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-}
-
-type containers interface {
- Get(context.Context, cid.ID) (*containercore.Container, error)
-}
-
-type apeChecker struct {
- router policyengine.ChainRouter
- reader containers
- ir ir
- nm netmap.Source
-
- frostFSIDClient frostfsidcore.SubjectProvider
-
- next Server
-}
-
-func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm netmap.Source, frostFSIDClient frostfsidcore.SubjectProvider, srv Server) Server {
- return &apeChecker{
- router: router,
- reader: reader,
- ir: ir,
- next: srv,
- nm: nm,
- frostFSIDClient: frostFSIDClient,
- }
-}
-
-func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete")
- defer span.End()
-
- if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
- nativeschema.MethodDeleteContainer); err != nil {
- return nil, err
- }
-
- return ac.next.Delete(ctx, req)
-}
-
-func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Get")
- defer span.End()
-
- if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
- nativeschema.MethodGetContainer); err != nil {
- return nil, err
- }
-
- return ac.next.Get(ctx, req)
-}
-
-func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
- defer span.End()
-
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
- if err != nil {
- return nil, err
- }
-
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
-
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return nil, err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
- if err != nil {
- return nil, fmt.Errorf("could not get owner namespace: %w", err)
- }
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
- return nil, err
- }
-
- request := aperequest.NewRequest(
- nativeschema.MethodListContainers,
- aperequest.NewResource(
- resourceName(namespace, ""),
- make(map[string]string),
- ),
- reqProps,
- )
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return nil, fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetWithNamespace(namespace)
- rt.User = &policyengine.Target{
- Type: policyengine.User,
- Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
- }
- rt.Groups = make([]policyengine.Target, len(groups))
- for i := range groups {
- rt.Groups[i] = policyengine.GroupTarget(groups[i])
- }
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return nil, err
- }
-
- if found && s == apechain.Allow {
- return ac.next.List(ctx, req)
- }
-
- return nil, apeErr(nativeschema.MethodListContainers, s)
-}
-
-func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream")
- defer span.End()
-
- role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
- if err != nil {
- return err
- }
-
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
-
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID())
- if err != nil {
- return fmt.Errorf("could not get owner namespace: %w", err)
- }
- if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil {
- return err
- }
-
- request := aperequest.NewRequest(
- nativeschema.MethodListContainers,
- aperequest.NewResource(
- resourceName(namespace, ""),
- make(map[string]string),
- ),
- reqProps,
- )
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetWithNamespace(namespace)
- rt.User = &policyengine.Target{
- Type: policyengine.User,
- Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
- }
- rt.Groups = make([]policyengine.Target, len(groups))
- for i := range groups {
- rt.Groups[i] = policyengine.GroupTarget(groups[i])
- }
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return err
- }
-
- if found && s == apechain.Allow {
- return ac.next.ListStream(req, stream)
- }
-
- return apeErr(nativeschema.MethodListContainers, s)
-}
-
-func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
- defer span.End()
-
- role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
- if err != nil {
- return nil, err
- }
-
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
-
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return nil, err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID())
- if err != nil {
- return nil, fmt.Errorf("get namespace error: %w", err)
- }
- if err = validateNamespace(req.GetBody().GetContainer(), namespace); err != nil {
- return nil, err
- }
-
- request := aperequest.NewRequest(
- nativeschema.MethodPutContainer,
- aperequest.NewResource(
- resourceName(namespace, ""),
- make(map[string]string),
- ),
- reqProps,
- )
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return nil, fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := policyengine.NewRequestTargetWithNamespace(namespace)
- rt.User = &policyengine.Target{
- Type: policyengine.User,
- Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
- }
- rt.Groups = make([]policyengine.Target, len(groups))
- for i := range groups {
- rt.Groups[i] = policyengine.GroupTarget(groups[i])
- }
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return nil, err
- }
-
- if found && s == apechain.Allow {
- return ac.next.Put(ctx, req)
- }
-
- return nil, apeErr(nativeschema.MethodPutContainer, s)
-}
-
-func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
- if vh == nil {
- return "", nil, errMissingVerificationHeader
- }
-
- if oID == nil {
- return "", nil, errMissingOwnerID
- }
- var ownerID user.ID
- if err := ownerID.ReadFromV2(*oID); err != nil {
- return "", nil, err
- }
-
- actor, pk, err := ac.getActorAndPublicKey(mh, vh, undefinedContainerID)
- if err != nil {
- return "", nil, err
- }
-
- if actor.Equals(ownerID) {
- return nativeschema.PropertyValueContainerRoleOwner, pk, nil
- }
-
- pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
- if err != nil {
- return "", nil, err
- }
- if isIR {
- return nativeschema.PropertyValueContainerRoleIR, pk, nil
- }
-
- return nativeschema.PropertyValueContainerRoleOthers, pk, nil
-}
-
-func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error {
- if vh == nil {
- return errMissingVerificationHeader
- }
-
- id, err := getContainerID(containerID)
- if err != nil {
- return err
- }
-
- cont, err := ac.reader.Get(ctx, id)
- if err != nil {
- return err
- }
-
- reqProps, pk, err := ac.getRequestProps(ctx, mh, vh, cont, id)
- if err != nil {
- return err
- }
-
- namespace := ""
- cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cont.Value).Zone(), ".ns")
- if hasNamespace {
- namespace = cntNamespace
- }
-
- groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- request := aperequest.NewRequest(
- op,
- aperequest.NewResource(
- resourceName(namespace, id.EncodeToString()),
- ac.getContainerProps(cont),
- ),
- reqProps,
- )
-
- s, found, err := ac.router.IsAllowed(apechain.Ingress,
- policyengine.NewRequestTargetExtended(namespace, id.EncodeToString(), fmt.Sprintf("%s:%s", namespace, pk.Address()), groups),
- request)
- if err != nil {
- return err
- }
-
- if found && s == apechain.Allow {
- return nil
- }
-
- return apeErr(op, s)
-}
-
-func apeErr(operation string, status apechain.Status) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(fmt.Sprintf("access to container operation %s is denied by access policy engine: %s", operation, status.String()))
- return errAccessDenied
-}
-
-func getContainerID(reqContID *refs.ContainerID) (cid.ID, error) {
- if reqContID == nil {
- return cid.ID{}, errMissingContainerID
- }
- var id cid.ID
- err := id.ReadFromV2(*reqContID)
- if err != nil {
- return cid.ID{}, fmt.Errorf("invalid container ID: %w", err)
- }
- return id, nil
-}
-
-func resourceName(namespace string, container string) string {
- if namespace == "" && container == "" {
- return nativeschema.ResourceFormatRootContainers
- }
- if namespace == "" && container != "" {
- return fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)
- }
- if namespace != "" && container == "" {
- return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, namespace)
- }
- return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
-}
-
-func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
- return map[string]string{
- nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
- }
-}
-
-func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
- cont *containercore.Container, cnrID cid.ID,
-) (map[string]string, *keys.PublicKey, error) {
- actor, pk, err := ac.getActorAndPublicKey(mh, vh, cnrID)
- if err != nil {
- return nil, nil, err
- }
- role, err := ac.getRole(ctx, actor, pk, cont, cnrID)
- if err != nil {
- return nil, nil, err
- }
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
- nativeschema.PropertyKeyActorRole: role,
- }
- reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk)
- if err != nil {
- return nil, nil, err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
- return reqProps, pk, nil
-}
-
-func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
- if cont.Value.Owner().Equals(*actor) {
- return nativeschema.PropertyValueContainerRoleOwner, nil
- }
-
- pkBytes := pk.Bytes()
- isIR, err := ac.isInnerRingKey(ctx, pkBytes)
- if err != nil {
- return "", err
- }
- if isIR {
- return nativeschema.PropertyValueContainerRoleIR, nil
- }
-
- isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont)
- if err != nil {
- return "", err
- }
- if isContainer {
- return nativeschema.PropertyValueContainerRoleContainer, nil
- }
-
- return nativeschema.PropertyValueContainerRoleOthers, nil
-}
-
-func (ac *apeChecker) getActorAndPublicKey(mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
- st, err := ac.getSessionToken(mh)
- if err != nil {
- return nil, nil, err
- }
-
- if st != nil {
- return ac.getActorAndPKFromSessionToken(st, cnrID)
- }
- return ac.getActorAndPKFromSignature(vh)
-}
-
-func (ac *apeChecker) getActorAndPKFromSignature(vh *session.RequestVerificationHeader) (*user.ID, *keys.PublicKey, error) {
- for vh.GetOrigin() != nil {
- vh = vh.GetOrigin()
- }
- sig := vh.GetBodySignature()
- if sig == nil {
- return nil, nil, errEmptyBodySignature
- }
- key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var userID user.ID
- user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
-
- return &userID, key, nil
-}
-
-func (ac *apeChecker) getSessionToken(mh *session.RequestMetaHeader) (*sessionSDK.Container, error) {
- for mh.GetOrigin() != nil {
- mh = mh.GetOrigin()
- }
- st := mh.GetSessionToken()
- if st == nil {
- return nil, nil
- }
-
- var tok sessionSDK.Container
- err := tok.ReadFromV2(*st)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- return &tok, nil
-}
-
-func (ac *apeChecker) getActorAndPKFromSessionToken(st *sessionSDK.Container, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
- if cnrID != undefinedContainerID && !st.AppliedTo(cnrID) {
- return nil, nil, errSessionContainerMissmatch
- }
- if !st.VerifySignature() {
- return nil, nil, errInvalidSessionTokenSignature
- }
- var tok session.Token
- st.WriteToV2(&tok)
-
- signaturePublicKey, err := keys.NewPublicKeyFromBytes(tok.GetSignature().GetKey(), elliptic.P256())
- if err != nil {
- return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err)
- }
-
- tokenIssuer := st.Issuer()
- if !isOwnerFromKey(tokenIssuer, signaturePublicKey) {
- return nil, nil, errInvalidSessionTokenOwner
- }
-
- return &tokenIssuer, signaturePublicKey, nil
-}
-
-func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
- if key == nil {
- return false
- }
-
- var id2 user.ID
- user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
-
- return id2.Equals(id)
-}
-
-func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) {
- innerRingKeys, err := ac.ir.InnerRingKeys(ctx)
- if err != nil {
- return false, err
- }
-
- for i := range innerRingKeys {
- if bytes.Equal(innerRingKeys[i], pk) {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
- binCnrID := make([]byte, sha256.Size)
- cnrID.Encode(binCnrID)
-
- nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm)
- if err != nil {
- return false, err
- }
-
- if isContainerNode(nm, pk, binCnrID, cont) {
- return true, nil
- }
-
- // then check previous netmap, this can happen in-between epoch change
- // when node migrates data from last epoch container
- nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm)
- if err != nil {
- return false, err
- }
-
- return isContainerNode(nm, pk, binCnrID, cont), nil
-}
-
-func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
- // It could an error only if the network map doesn't have enough nodes to
- // fulfil the policy. It's a logical error that doesn't affect an actor role
- // determining, so we ignore it
- cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
-
- for i := range cnrVectors {
- for j := range cnrVectors[i] {
- if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
- return true
- }
- }
- }
-
- return false
-}
-
-func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
- var ownerSDK user.ID
- if owner == nil {
- return "", errOwnerIDIsNotSet
- }
- if err := ownerSDK.ReadFromV2(*owner); err != nil {
- return "", err
- }
- addr := ownerSDK.ScriptHash()
-
- namespace := ""
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
- if err == nil {
- namespace = subject.Namespace
- } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
- return "", fmt.Errorf("get subject error: %w", err)
- }
- return namespace, nil
-}
-
-func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) {
- var ownerSDK user.ID
- if owner == nil {
- return "", errOwnerIDIsNotSet
- }
- if err := ownerSDK.ReadFromV2(*owner); err != nil {
- return "", err
- }
- addr := ownerSDK.ScriptHash()
- subject, err := ac.frostFSIDClient.GetSubject(ctx, addr)
- if err != nil {
- return "", fmt.Errorf("get subject error: %w", err)
- }
- return subject.Namespace, nil
-}
-
-// validateNamespace validates a namespace set in a container.
-// If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request
-// is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied.
-func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) error {
- if cnrV2 == nil {
- return nil
- }
- var cnr cnrSDK.Container
- if err := cnr.ReadFromV2(*cnrV2); err != nil {
- return err
- }
- cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr).Zone(), ".ns")
- if hasNamespace {
- if cntNamespace != ownerIDNamespace {
- if ownerIDNamespace == "" {
- return errInvalidDomainZone
- }
- return fmt.Errorf("invalid domain zone: expected namespace %s, but got %s", ownerIDNamespace, cntNamespace)
- }
- } else if ownerIDNamespace != "" {
- return fmt.Errorf("invalid domain zone: expected namespace %s, but got invalid or empty", ownerIDNamespace)
- }
- return nil
-}
-
-// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
-// An actor's namespace is calculated by a public key.
-func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error {
- var actor user.ID
- user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
- actorOwnerID := new(refs.OwnerID)
- actor.WriteToV2(actorOwnerID)
- actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID)
- if err != nil {
- return fmt.Errorf("could not get actor namespace: %w", err)
- }
- if actorNamespace != ownerIDNamespace {
- return fmt.Errorf("actor namespace %s differs from owner: %s", actorNamespace, ownerIDNamespace)
- }
- return nil
-}
-
-// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
- if reqProps == nil {
- reqProps = make(map[string]string)
- }
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk)
- if err != nil {
- return reqProps, err
- }
- for propertyName, properyValue := range props {
- reqProps[propertyName] = properyValue
- }
- return reqProps, nil
-}
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
deleted file mode 100644
index 77a981d1a..000000000
--- a/pkg/services/container/ape_test.go
+++ /dev/null
@@ -1,1586 +0,0 @@
-package container
-
-import (
- "context"
- "crypto/ecdsa"
- "encoding/hex"
- "errors"
- "fmt"
- "net"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc/peer"
-)
-
-const (
- testDomainName = "testdomainname"
- testDomainZone = "testdomainname.ns"
-)
-
-func TestAPE(t *testing.T) {
- t.Parallel()
- t.Run("allow then deny get container", testAllowThenDenyGetContainerRuleDefined)
- t.Run("allow by group id", TestAllowByGroupIDs)
- t.Run("deny get container no rule found", testDenyGetContainerNoRuleFound)
- t.Run("deny get container for others", testDenyGetContainerForOthers)
- t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag)
- t.Run("deny get container by IP", testDenyGetContainerByIP)
- t.Run("deny get container by group id", testDenyGetContainerByGroupID)
- t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken)
- t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID)
- t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
- t.Run("deny list containers for owner with PK", testDenyListContainersForPK)
- t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError)
-}
-
-const (
- incomingIP = "192.92.33.1"
-)
-
-func ctxWithPeerInfo() context.Context {
- return peer.NewContext(context.Background(), &peer.Peer{
- Addr: &net.TCPAddr{
- IP: net.ParseIP(incomingIP),
- Port: 41111,
- },
- })
-}
-
-func testAllowThenDenyGetContainerRuleDefined(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- addDefaultAllowGetPolicy(t, router, contID)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- _, err = apeSrv.Get(context.Background(), req)
- require.NoError(t, err)
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
-func TestAllowByGroupIDs(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 1,
- Name: "Group#1",
- },
- },
- },
- },
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.GroupTarget(":1"), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: commonschema.PropertyKeyFrostFSIDGroupID,
- Value: "1",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.NotNil(t, resp)
- require.NoError(t, err)
-}
-
-func testDenyGetContainerNoRuleFound(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.NoRuleFound.String())
-}
-
-func testDenyGetContainerForOthers(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyGetContainerByUserClaimTag(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"),
- Value: "value100",
- Op: chain.CondStringNotEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyGetContainerByIP(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: commonschema.PropertyKeyFrostFSSourceIP,
- Value: incomingIP + "/16",
- Op: chain.CondIPAddress,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(ctxWithPeerInfo(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
- require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String())
-}
-
-func testDenyGetContainerByGroupID(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- pk.PublicKey().GetScriptHash(): {
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: commonschema.PropertyKeyFrostFSIDGroupID,
- Value: "19888",
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.Get(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyPutContainerForOthersSessionToken(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- testContainer := containertest.Container()
- owner := testContainer.Owner()
- ownerAddr := owner.ScriptHash()
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- ownerAddr: {},
- },
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodPutContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatRootContainers,
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initPutRequest(t, testContainer)
-
- resp, err := apeSrv.Put(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- cnrID, testContainer := initTestContainer(t, true)
- contRdr.c[cnrID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodPutContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
- resp, err := apeSrv.Put(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyPutContainerInvalidNamespace(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- cnrID, testContainer := initTestContainer(t, false)
- var domain cnrSDK.Domain
- domain.SetName("incorrect" + testDomainName)
- domain.SetZone("incorrect" + testDomainZone)
- cnrSDK.WriteDomain(&testContainer, domain)
- contRdr.c[cnrID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodPutContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initPutRequest(t, testContainer)
- ownerScriptHash := initOwnerIDScriptHash(testContainer)
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
- resp, err := apeSrv.Put(context.Background(), req)
- require.Nil(t, resp)
- require.ErrorContains(t, err, "invalid domain zone")
-}
-
-func testDenyListContainersForPK(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodListContainers,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatRootContainers,
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(pk.PublicKey().Bytes()),
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- var userID user.ID
- user.IDFromKey(&userID, pk.PrivateKey.PublicKey)
-
- req := &container.ListRequest{}
- req.SetBody(&container.ListRequestBody{})
- var ownerID refs.OwnerID
- userID.WriteToV2(&ownerID)
- req.GetBody().SetOwnerID(&ownerID)
-
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- resp, err := apeSrv.List(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
-func testDenyListContainersValidationNamespaceError(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
-
- actorPK, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- ownerPK, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- actorScriptHash, ownerScriptHash := initActorOwnerScriptHashes(t, actorPK, ownerPK)
-
- const actorDomain = "actor" + testDomainName
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{
- actorScriptHash: {
- Namespace: actorDomain,
- Name: actorDomain,
- },
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- },
- },
- subjectsExt: map[util.Uint160]*client.SubjectExtended{
- actorScriptHash: {
- Namespace: actorDomain,
- Name: actorDomain,
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19777,
- },
- },
- },
- ownerScriptHash: {
- Namespace: testDomainName,
- Name: testDomainName,
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 19888,
- },
- },
- },
- },
- }
-
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
-
- _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodListContainers,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatRootContainers,
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorPublicKey,
- Value: actorPK.PublicKey().String(),
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initListRequest(t, actorPK, ownerPK)
-
- resp, err := apeSrv.List(context.Background(), req)
- require.Nil(t, resp)
- require.ErrorContains(t, err, "actor namespace "+actorDomain+" differs")
-}
-
-type srvStub struct {
- calls map[string]int
-}
-
-func (s *srvStub) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) {
- s.calls["Delete"]++
- return &container.DeleteResponse{}, nil
-}
-
-func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetResponse, error) {
- s.calls["Get"]++
- return &container.GetResponse{}, nil
-}
-
-func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) {
- s.calls["List"]++
- return &container.ListResponse{}, nil
-}
-
-func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error {
- s.calls["ListStream"]++
- return nil
-}
-
-func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) {
- s.calls["Put"]++
- return &container.PutResponse{}, nil
-}
-
-type irStub struct {
- keys [][]byte
-}
-
-func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) {
- return s.keys, nil
-}
-
-type containerStub struct {
- c map[cid.ID]*containercore.Container
-}
-
-func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) {
- if v, ok := s.c[id]; ok {
- return v, nil
- }
- return nil, errors.New("container not found")
-}
-
-type netmapStub struct {
- netmaps map[uint64]*netmap.NetMap
- currentEpoch uint64
-}
-
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
- if diff >= s.currentEpoch {
- return nil, errors.New("invalid diff")
- }
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
-}
-
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
- if nm, found := s.netmaps[epoch]; found {
- return nm, nil
- }
- return nil, errors.New("netmap not found")
-}
-
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
- return s.currentEpoch, nil
-}
-
-type frostfsidStub struct {
- subjects map[util.Uint160]*client.Subject
- subjectsExt map[util.Uint160]*client.SubjectExtended
-}
-
-func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) {
- s, ok := f.subjects[owner]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return s, nil
-}
-
-func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) {
- s, ok := f.subjectsExt[owner]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return s, nil
-}
-
-type testAPEServer struct {
- engine engine.Engine
-
- containerReader *containerStub
-
- ir *irStub
-
- netmap *netmapStub
-
- frostfsIDSubjectReader *frostfsidStub
-
- apeChecker *apeChecker
-}
-
-func newTestAPEServer() testAPEServer {
- srv := &srvStub{
- calls: map[string]int{},
- }
-
- engine := inmemory.NewInMemory()
-
- containerReader := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
-
- ir := &irStub{
- keys: [][]byte{},
- }
-
- netmap := &netmapStub{}
-
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- subjectsExt: map[util.Uint160]*client.SubjectExtended{},
- }
-
- apeChecker := &apeChecker{
- router: engine,
- reader: containerReader,
- ir: ir,
- nm: netmap,
- frostFSIDClient: frostfsIDSubjectReader,
- next: srv,
- }
-
- return testAPEServer{
- engine: engine,
- containerReader: containerReader,
- ir: ir,
- netmap: netmap,
- frostfsIDSubjectReader: frostfsIDSubjectReader,
- apeChecker: apeChecker,
- }
-}
-
-func TestValidateContainerBoundedOperation(t *testing.T) {
- t.Parallel()
-
- t.Run("check root-defined container in root-defined container target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, false)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
- require.ErrorContains(t, err, aErr.Error())
- })
-
- t.Run("check root-defined container in testdomain-defined container target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, false)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- addDefaultAllowGetPolicy(t, components.engine, contID)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- require.NoError(t, err)
- })
-
- t.Run("check root-defined container in testdomain namespace target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, false)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- addDefaultAllowGetPolicy(t, components.engine, contID)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- require.NoError(t, err)
- })
-
- t.Run("check testdomain-defined container in root-defined container target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, true)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- addDefaultAllowGetPolicy(t, components.engine, contID)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- require.NoError(t, err)
- })
-
- t.Run("check testdomain-defined container in testdomain-defined container target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, true)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, testDomainName, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- addDefaultAllowGetPolicy(t, components.engine, contID)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
- require.ErrorContains(t, err, aErr.Error())
- })
-
- t.Run("check testdomain-defined container in testdomain namespace target rule", func(t *testing.T) {
- t.Parallel()
-
- components := newTestAPEServer()
- contID, testContainer := initTestContainer(t, true)
- components.containerReader.c[contID] = &containercore.Container{Value: testContainer}
- initTestNetmap(components.netmap)
-
- _, _, err := components.engine.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(testDomainName), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, testDomainName),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := initTestGetContainerRequest(t, contID)
-
- err = components.apeChecker.validateContainerBoundedOperation(ctxWithPeerInfo(), req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(), nativeschema.MethodGetContainer)
- aErr := apeErr(nativeschema.MethodGetContainer, chain.AccessDenied)
- require.ErrorContains(t, err, aErr.Error())
- })
-}
-
-func initTestGetContainerRequest(t *testing.T, contID cid.ID) *container.GetRequest {
- req := &container.GetRequest{}
- req.SetBody(&container.GetRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
- return req
-}
-
-func initTestNetmap(netmapStub *netmapStub) {
- netmapStub.currentEpoch = 100
- netmapStub.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(netmapStub.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- netmapStub.netmaps[netmapStub.currentEpoch] = &testNetmap
- netmapStub.netmaps[netmapStub.currentEpoch-1] = &testNetmap
-}
-
-func initTestContainer(t *testing.T, isDomainSet bool) (cid.ID, cnrSDK.Container) {
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- if isDomainSet {
- // no domain defined -> container is defined in root namespace
- var domain cnrSDK.Domain
- domain.SetName(testDomainName)
- domain.SetZone(testDomainZone)
- cnrSDK.WriteDomain(&testContainer, domain)
- }
- return contID, testContainer
-}
-
-func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.PutRequest {
- req := &container.PutRequest{}
- req.SetBody(&container.PutRequestBody{})
- var reqCont container.Container
- testContainer.WriteToV2(&reqCont)
- req.GetBody().SetContainer(&reqCont)
-
- sessionPK, err := keys.NewPrivateKey()
- require.NoError(t, err)
- sToken := sessiontest.ContainerSigned()
- sToken.ApplyOnlyTo(cid.ID{})
- require.NoError(t, sToken.Sign(sessionPK.PrivateKey))
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetSessionToken(&sTokenV2)
- req.SetMetaHeader(metaHeader)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- return req
-}
-
-func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 {
- var ownerSDK *user.ID
- owner := testContainer.Owner()
- ownerSDK = &owner
- return ownerSDK.ScriptHash()
-}
-
-func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) {
- var actorUserID user.ID
- user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey()))
- actorScriptHash = actorUserID.ScriptHash()
-
- var ownerUserID user.ID
- user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey()))
- ownerScriptHash = ownerUserID.ScriptHash()
- require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String())
- return
-}
-
-func initListRequest(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) *container.ListRequest {
- var ownerUserID user.ID
- user.IDFromKey(&ownerUserID, ownerPK.PrivateKey.PublicKey)
-
- req := &container.ListRequest{}
- req.SetBody(&container.ListRequestBody{})
- var ownerID refs.OwnerID
- ownerUserID.WriteToV2(&ownerID)
- req.GetBody().SetOwnerID(&ownerID)
-
- require.NoError(t, signature.SignServiceMessage(&actorPK.PrivateKey, req))
- return req
-}
-
-func addDefaultAllowGetPolicy(t *testing.T, e engine.Engine, contID cid.ID) {
- _, _, err := e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainer,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- nativeschema.ResourceFormatAllContainers,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
deleted file mode 100644
index b235efa3c..000000000
--- a/pkg/services/container/audit.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package container
-
-import (
- "context"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-var _ Server = (*auditService)(nil)
-
-type auditService struct {
- next Server
- log *logger.Logger
- enabled *atomic.Bool
-}
-
-func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Server {
- return &auditService{
- next: next,
- log: log,
- enabled: enabled,
- }
-}
-
-// Delete implements Server.
-func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- res, err := a.next.Delete(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
-
- return res, err
-}
-
-// Get implements Server.
-func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- res, err := a.next.Get(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
- return res, err
-}
-
-// List implements Server.
-func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- res, err := a.next.List(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
- return res, err
-}
-
-// ListStream implements Server.
-func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := a.next.ListStream(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil)
- return err
-}
-
-// Put implements Server.
-func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- res, err := a.next.Put(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req,
- audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
- return res, err
-}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index cdd0d2514..d4ae11d62 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -4,9 +4,9 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type ServiceExecutor interface {
@@ -14,7 +14,8 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- ListStream(context.Context, *container.ListStreamRequest, ListStream) error
+ SetExtendedACL(context.Context, *session.Token, *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error)
+ GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error)
}
type executorSvc struct {
@@ -95,10 +96,33 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
return resp, nil
}
-func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- err := s.exec.ListStream(stream.Context(), req, stream)
- if err != nil {
- return fmt.Errorf("could not execute ListStream request: %w", err)
+func (s *executorSvc) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
+ meta := req.GetMetaHeader()
+ for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
+ meta = origin
}
- return nil
+
+ respBody, err := s.exec.SetExtendedACL(ctx, meta.GetSessionToken(), req.GetBody())
+ if err != nil {
+ return nil, fmt.Errorf("could not execute SetEACL request: %w", err)
+ }
+
+ resp := new(container.SetExtendedACLResponse)
+ resp.SetBody(respBody)
+
+ s.respSvc.SetMeta(resp)
+ return resp, nil
+}
+
+func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
+ respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody())
+ if err != nil {
+ return nil, fmt.Errorf("could not execute GetEACL request: %w", err)
+ }
+
+ resp := new(container.GetExtendedACLResponse)
+ resp.SetBody(respBody)
+
+ s.respSvc.SetMeta(resp)
+ return resp, nil
}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index eaa608eba..dec022219 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -5,18 +5,17 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-var errMissingUserID = errors.New("missing user ID")
-
type morphExecutor struct {
rdr Reader
wrt Writer
@@ -25,20 +24,22 @@ type morphExecutor struct {
// Reader is an interface of read-only container storage.
type Reader interface {
containercore.Source
+ containercore.EACLSource
- // ContainersOf returns a list of container identifiers belonging
+ // List returns a list of container identifiers belonging
// to the specified user of FrostFS system. Returns the identifiers
// of all FrostFS containers if pointer to owner identifier is nil.
- ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
- IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
+ List(*user.ID) ([]cid.ID, error)
}
// Writer is an interface of container storage updater.
type Writer interface {
// Put stores specified container in the side chain.
- Put(context.Context, containercore.Container) (*cid.ID, error)
+ Put(containercore.Container) (*cid.ID, error)
// Delete removes specified container from the side chain.
- Delete(context.Context, containercore.RemovalWitness) error
+ Delete(containercore.RemovalWitness) error
+ // PutEACL updates extended ACL table of specified container in the side chain.
+ PutEACL(containercore.EACL) error
}
func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
@@ -48,7 +49,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor {
}
}
-func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
+func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) {
sigV2 := body.GetSignature()
if sigV2 == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -81,7 +82,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
}
}
- idCnr, err := s.wrt.Put(ctx, cnr)
+ idCnr, err := s.wrt.Put(cnr)
if err != nil {
return nil, err
}
@@ -95,7 +96,7 @@ func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *c
return res, nil
}
-func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
+func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -125,7 +126,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
rmWitness.Signature = body.GetSignature()
rmWitness.SessionToken = tok
- err = s.wrt.Delete(ctx, rmWitness)
+ err = s.wrt.Delete(rmWitness)
if err != nil {
return nil, err
}
@@ -133,7 +134,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body
return new(container.DeleteResponseBody), nil
}
-func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
+func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
return nil, errors.New("missing container ID")
@@ -146,7 +147,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- cnr, err := s.rdr.Get(ctx, id)
+ cnr, err := s.rdr.Get(id)
if err != nil {
return nil, err
}
@@ -173,10 +174,10 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody)
return res, nil
}
-func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
+func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) {
idV2 := body.GetOwnerID()
if idV2 == nil {
- return nil, errMissingUserID
+ return nil, fmt.Errorf("missing user ID")
}
var id user.ID
@@ -186,7 +187,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return nil, fmt.Errorf("invalid user ID: %w", err)
}
- cnrs, err := s.rdr.ContainersOf(ctx, &id)
+ cnrs, err := s.rdr.List(&id)
if err != nil {
return nil, err
}
@@ -202,55 +203,72 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod
return res, nil
}
-func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error {
- body := req.GetBody()
- idV2 := body.GetOwnerID()
- if idV2 == nil {
- return errMissingUserID
+func (s *morphExecutor) SetExtendedACL(_ context.Context, tokV2 *sessionV2.Token, body *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) {
+ sigV2 := body.GetSignature()
+ if sigV2 == nil {
+ // TODO(@cthulhu-rider): #468 use "const" error
+ return nil, errors.New("missing signature")
}
- var id user.ID
+ eaclInfo := containercore.EACL{
+ Value: eaclSDK.NewTableFromV2(body.GetEACL()),
+ }
+
+ err := eaclInfo.Signature.ReadFromV2(*sigV2)
+ if err != nil {
+ return nil, fmt.Errorf("can't read signature: %w", err)
+ }
+
+ if tokV2 != nil {
+ eaclInfo.Session = new(session.Container)
+
+ err := eaclInfo.Session.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+ }
+
+ err = s.wrt.PutEACL(eaclInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ return new(container.SetExtendedACLResponseBody), nil
+}
+
+func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) {
+ idV2 := body.GetContainerID()
+ if idV2 == nil {
+ return nil, errors.New("missing container ID")
+ }
+
+ var id cid.ID
err := id.ReadFromV2(*idV2)
if err != nil {
- return fmt.Errorf("invalid user ID: %w", err)
+ return nil, fmt.Errorf("invalid container ID: %w", err)
}
- resBody := new(container.ListStreamResponseBody)
- r := new(container.ListStreamResponse)
- r.SetBody(resBody)
-
- var cidList []refs.ContainerID
-
- // Amount of containers to send at once.
- const batchSize = 1000
-
- processCID := func(id cid.ID) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- var refID refs.ContainerID
- id.WriteToV2(&refID)
- cidList = append(cidList, refID)
- if len(cidList) == batchSize {
- r.GetBody().SetContainerIDs(cidList)
- cidList = cidList[:0]
- return stream.Send(r)
- }
- return nil
+ eaclInfo, err := s.rdr.GetEACL(id)
+ if err != nil {
+ return nil, err
}
- if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil {
- return err
+ var sigV2 refs.Signature
+ eaclInfo.Signature.WriteToV2(&sigV2)
+
+ var tokV2 *sessionV2.Token
+
+ if eaclInfo.Session != nil {
+ tokV2 = new(sessionV2.Token)
+
+ eaclInfo.Session.WriteToV2(tokV2)
}
- if len(cidList) > 0 {
- r.GetBody().SetContainerIDs(cidList)
- return stream.Send(r)
- }
+ res := new(container.GetExtendedACLResponseBody)
+ res.SetEACL(eaclInfo.Value.ToV2())
+ res.SetSignature(&sigV2)
+ res.SetSessionToken(tokV2)
- return nil
+ return res, nil
}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 1f6fdb0be..a270ee856 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -4,12 +4,12 @@ import (
"context"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container"
containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test"
@@ -24,11 +24,15 @@ type mock struct {
containerSvcMorph.Reader
}
-func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) {
+func (m mock) Put(_ containerCore.Container) (*cid.ID, error) {
return new(cid.ID), nil
}
-func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error {
+func (m mock) Delete(_ containerCore.RemovalWitness) error {
+ return nil
+}
+
+func (m mock) PutEACL(_ containerCore.EACL) error {
return nil
}
@@ -47,8 +51,7 @@ func TestInvalidToken(t *testing.T) {
sign := func(reqBody interface {
StableMarshal([]byte) []byte
SetSignature(signature *refs.Signature)
- },
- ) {
+ }) {
signer := frostfsecdsa.Signer(priv.PrivateKey)
var sig frostfscrypto.Signature
require.NoError(t, sig.Calculate(signer, reqBody.StableMarshal(nil)))
@@ -92,6 +95,17 @@ func TestInvalidToken(t *testing.T) {
return
},
},
+ {
+ name: "setEACL",
+ op: func(e containerSvc.ServiceExecutor, tokV2 *session.Token) (err error) {
+ var reqBody container.SetExtendedACLRequestBody
+ reqBody.SetSignature(new(refs.Signature))
+ sign(&reqBody)
+
+ _, err = e.SetExtendedACL(context.TODO(), tokV2, &reqBody)
+ return
+ },
+ },
}
for _, test := range tests {
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index d9208077d..052a8c945 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -3,8 +3,7 @@ package container
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
)
// Server is an interface of the FrostFS API Container service server.
@@ -13,11 +12,7 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- ListStream(*container.ListStreamRequest, ListStream) error
-}
-
-// ListStream is an interface of FrostFS API v2 compatible search streamer.
-type ListStream interface {
- util.ServerStream
- Send(*container.ListStreamResponse) error
+ SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error)
+ GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error)
+ AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error)
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 85fe7ae87..bba717f60 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
)
type signService struct {
@@ -57,39 +57,29 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error {
+func (s *signService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.ListStreamResponse)
- _ = s.sigSvc.SignResponse(resp, err)
- return stream.Send(resp)
+ resp := new(container.SetExtendedACLResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-
- ss := &listStreamSigner{
- ListStream: stream,
- sigSvc: s.sigSvc,
- }
- err := s.svc.ListStream(req, ss)
- if err != nil || !ss.nonEmptyResp {
- return ss.send(new(container.ListStreamResponse), err)
- }
- return nil
+ resp, err := util.EnsureNonNilResponse(s.svc.SetExtendedACL(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
-type listStreamSigner struct {
- ListStream
- sigSvc *util.SignService
-
- nonEmptyResp bool // set on first Send call
-}
-
-func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error {
- s.nonEmptyResp = true
- return s.send(resp, nil)
-}
-
-func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error {
- if err := s.sigSvc.SignResponse(resp, err); err != nil {
- return err
+func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.GetExtendedACLResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
}
- return s.ListStream.Send(resp)
+ resp, err := util.EnsureNonNilResponse(s.svc.GetExtendedACL(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
+
+func (s *signService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
+ if err := s.sigSvc.VerifyRequest(req); err != nil {
+ resp := new(container.AnnounceUsedSpaceResponse)
+ return resp, s.sigSvc.SignResponse(resp, err)
+ }
+ resp, err := util.EnsureNonNilResponse(s.svc.AnnounceUsedSpace(ctx, req))
+ return resp, s.sigSvc.SignResponse(resp, err)
}
diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go
deleted file mode 100644
index 4f8708da7..000000000
--- a/pkg/services/container/transport_splitter.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package container
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
-)
-
-type (
- TransportSplitter struct {
- next Server
-
- respSvc *response.Service
- cnrAmount uint32
- }
-
- listStreamMsgSizeCtrl struct {
- util.ServerStream
- stream ListStream
- respSvc *response.Service
- cnrAmount uint32
- }
-)
-
-func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server {
- return &TransportSplitter{
- next: next,
- respSvc: respSvc,
- cnrAmount: cnrAmount,
- }
-}
-
-func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
- return s.next.Put(ctx, req)
-}
-
-func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
- return s.next.Delete(ctx, req)
-}
-
-func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
- return s.next.Get(ctx, req)
-}
-
-func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
- return s.next.List(ctx, req)
-}
-
-func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error {
- return s.next.ListStream(req, &listStreamMsgSizeCtrl{
- ServerStream: stream,
- stream: stream,
- respSvc: s.respSvc,
- cnrAmount: s.cnrAmount,
- })
-}
-
-func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error {
- s.respSvc.SetMeta(resp)
- body := resp.GetBody()
- ids := body.GetContainerIDs()
-
- var newResp *container.ListStreamResponse
-
- for {
- if newResp == nil {
- newResp = new(container.ListStreamResponse)
- newResp.SetBody(body)
- }
-
- cut := min(s.cnrAmount, uint32(len(ids)))
-
- body.SetContainerIDs(ids[:cut])
- newResp.SetMetaHeader(resp.GetMetaHeader())
- newResp.SetVerificationHeader(resp.GetVerificationHeader())
-
- if err := s.stream.Send(newResp); err != nil {
- return fmt.Errorf("TransportSplitter: %w", err)
- }
-
- ids = ids[cut:]
-
- if len(ids) == 0 {
- break
- }
- }
-
- return nil
-}
diff --git a/pkg/services/control/common_test.go b/pkg/services/control/common_test.go
new file mode 100644
index 000000000..bc512b4be
--- /dev/null
+++ b/pkg/services/control/common_test.go
@@ -0,0 +1,33 @@
+package control_test
+
+import (
+ "crypto/rand"
+ "testing"
+
+ "github.com/mr-tron/base58"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/proto"
+)
+
+type protoMessage interface {
+ StableMarshal([]byte) []byte
+ proto.Message
+}
+
+func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
+ require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
+
+ require.True(t, cmp(m1, m2))
+}
+
+func testData(sz int) []byte {
+ d := make([]byte, sz)
+
+ _, _ = rand.Read(d)
+
+ return d
+}
+
+func testString() string {
+ return base58.Encode(testData(10))
+}
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index 37daf67be..fd6f020d1 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go
index 024676b87..c892c5b6c 100644
--- a/pkg/services/control/ir/convert.go
+++ b/pkg/services/control/ir/convert.go
@@ -1,8 +1,8 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message"
)
type requestWrapper struct {
diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go
index 62f800d99..1b635c149 100644
--- a/pkg/services/control/ir/rpc.go
+++ b/pkg/services/control/ir/rpc.go
@@ -1,18 +1,17 @@
package control
import (
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc"
)
const serviceName = "ircontrol.ControlService"
const (
- rpcHealthCheck = "HealthCheck"
- rpcTickEpoch = "TickEpoch"
- rpcRemoveNode = "RemoveNode"
- rpcRemoveContainer = "RemoveContainer"
+ rpcHealthCheck = "HealthCheck"
+ rpcTickEpoch = "TickEpoch"
+ rpcRemoveNode = "RemoveNode"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -41,14 +40,6 @@ func RemoveNode(
return sendUnary[RemoveNodeRequest, RemoveNodeResponse](cli, rpcRemoveNode, req, opts...)
}
-func RemoveContainer(
- cli *client.Client,
- req *RemoveContainerRequest,
- opts ...client.CallOption,
-) (*RemoveContainerResponse, error) {
- return sendUnary[RemoveContainerRequest, RemoveContainerResponse](cli, rpcRemoveContainer, req, opts...)
-}
-
func sendUnary[I, O grpc.Message](cli *client.Client, rpcName string, req *I, opts ...client.CallOption) (*O, error) {
var resp O
wResp := &responseWrapper[*O]{
diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go
deleted file mode 100644
index d9f65a2fc..000000000
--- a/pkg/services/control/ir/server/audit.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package control
-
-import (
- "context"
- "encoding/hex"
- "strings"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
- control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-var _ control.ControlServiceServer = (*auditService)(nil)
-
-type auditService struct {
- next *Server
- log *logger.Logger
- enabled *atomic.Bool
-}
-
-func NewAuditService(next *Server, log *logger.Logger, enabled *atomic.Bool) control.ControlServiceServer {
- return &auditService{
- next: next,
- log: log,
- enabled: enabled,
- }
-}
-
-// HealthCheck implements control.ControlServiceServer.
-func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) {
- res, err := a.next.HealthCheck(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil)
- return res, err
-}
-
-// RemoveContainer implements control.ControlServiceServer.
-func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
- res, err := a.next.RemoveContainer(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- sb := &strings.Builder{}
- var withConatiner bool
- if len(req.GetBody().GetContainerId()) > 0 {
- withConatiner = true
- sb.WriteString("containerID:")
- var containerID cid.ID
- if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
- sb.WriteString(audit.InvalidValue)
- } else {
- sb.WriteString(containerID.EncodeToString())
- }
- }
-
- if len(req.GetBody().GetOwner()) > 0 {
- if withConatiner {
- sb.WriteString(";")
- }
- sb.WriteString("owner:")
-
- var ownerID refs.OwnerID
- if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil {
- sb.WriteString(audit.InvalidValue)
- } else {
- var owner user.ID
- if err := owner.ReadFromV2(ownerID); err != nil {
- sb.WriteString(audit.InvalidValue)
- } else {
- sb.WriteString(owner.EncodeToString())
- }
- }
- }
-
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil)
- return res, err
-}
-
-// RemoveNode implements control.ControlServiceServer.
-func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
- res, err := a.next.RemoveNode(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(),
- audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil)
- return res, err
-}
-
-// TickEpoch implements control.ControlServiceServer.
-func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
- res, err := a.next.TickEpoch(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(),
- nil, err == nil)
- return res, err
-}
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 0509d2646..680d1e606 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -5,12 +5,8 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -40,7 +36,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
// TickEpoch forces a new epoch.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
+func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -48,16 +44,14 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
resp := new(control.TickEpochResponse)
resp.SetBody(new(control.TickEpochResponse_Body))
- epoch, err := s.netmapClient.Epoch(ctx)
+ epoch, err := s.netmapClient.Epoch()
if err != nil {
return nil, fmt.Errorf("getting current epoch: %w", err)
}
- vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub())
- if err != nil {
+ if err := s.netmapClient.NewEpoch(epoch+1, true); err != nil {
return nil, fmt.Errorf("forcing new epoch: %w", err)
}
- resp.Body.Vub = vub
if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
@@ -69,7 +63,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (
// RemoveNode forces a node removal.
//
// If request is not signed with a key from white list, permission error returns.
-func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
+func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) {
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
@@ -77,7 +71,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
resp := new(control.RemoveNodeResponse)
resp.SetBody(new(control.RemoveNodeResponse_Body))
- nm, err := s.netmapClient.NetMap(ctx)
+ nm, err := s.netmapClient.NetMap()
if err != nil {
return nil, fmt.Errorf("getting netmap: %w", err)
}
@@ -91,15 +85,13 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
if len(nodeInfo.PublicKey()) == 0 {
return nil, status.Error(codes.NotFound, "no such node")
}
- if nodeInfo.Status().IsOffline() {
+ if nodeInfo.IsOffline() {
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
- vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub())
- if err != nil {
+ if err := s.netmapClient.ForceRemovePeer(nodeInfo); err != nil {
return nil, fmt.Errorf("forcing node removal: %w", err)
}
- resp.Body.Vub = vub
if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
@@ -107,70 +99,3 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest)
return resp, nil
}
-
-// RemoveContainer forces a container removal.
-func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- if len(req.GetBody().GetContainerId()) > 0 && len(req.GetBody().GetOwner()) > 0 {
- return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed")
- }
- var vub uint32
- if len(req.GetBody().GetContainerId()) > 0 {
- var containerID cid.ID
- if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
- return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error())
- }
- var err error
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
- if err != nil {
- return nil, err
- }
- } else {
- var ownerID refs.OwnerID
- if err := ownerID.Unmarshal(req.GetBody().GetOwner()); err != nil {
- return nil, status.Error(codes.InvalidArgument, "failed to parse ownerID: %s"+err.Error())
- }
- var owner user.ID
- if err := owner.ReadFromV2(ownerID); err != nil {
- return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error())
- }
-
- cids, err := s.containerClient.ContainersOf(ctx, &owner)
- if err != nil {
- return nil, fmt.Errorf("failed to get owner's containers: %w", err)
- }
-
- for _, containerID := range cids {
- vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub())
- if err != nil {
- return nil, err
- }
- }
- }
-
- resp := &control.RemoveContainerResponse{
- Body: &control.RemoveContainerResponse_Body{
- Vub: vub,
- },
- }
- if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) {
- var prm container.DeletePrm
- prm.SetCID(containerID[:])
- prm.SetControlTX(true)
- prm.SetVUB(vub)
-
- vub, err := s.containerClient.Delete(ctx, prm)
- if err != nil {
- return 0, fmt.Errorf("forcing container removal: %w", err)
- }
- return vub, nil
-}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 9d5cfefc8..0c2de5300 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // HealthStatus must calculate and return current health status of the IR application.
+ // Must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go
index 0cfca71c1..dc00809a6 100644
--- a/pkg/services/control/ir/server/server.go
+++ b/pkg/services/control/ir/server/server.go
@@ -3,7 +3,6 @@ package control
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
)
@@ -13,10 +12,10 @@ import (
// To gain access to the service, any request must be
// signed with a key from the white list.
type Server struct {
- prm Prm
- netmapClient *netmap.Client
- containerClient *container.Client
- allowedKeys [][]byte
+ prm Prm
+ netmapClient *netmap.Client
+
+ allowedKeys [][]byte
}
func panicOnPrmValue(n string, v any) {
@@ -33,9 +32,10 @@ func panicOnPrmValue(n string, v any) {
// Forms white list from all keys specified via
// WithAllowedKeys option and a public key of
// the parameterized private key.
-func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server {
+func New(prm Prm, netmapClient *netmap.Client, opts ...Option) *Server {
// verify required parameters
- if prm.healthChecker == nil {
+ switch {
+ case prm.healthChecker == nil:
panicOnPrmValue("health checker", prm.healthChecker)
}
@@ -47,9 +47,8 @@ func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client
}
return &Server{
- prm: prm,
- netmapClient: netmapClient,
- containerClient: containerClient,
+ prm: prm,
+ netmapClient: netmapClient,
allowedKeys: append(o.allowedKeys, prm.key.PublicKey().Bytes()),
}
diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go
index d39f6d5f9..f72d51f9e 100644
--- a/pkg/services/control/ir/server/sign.go
+++ b/pkg/services/control/ir/server/sign.go
@@ -6,8 +6,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
diff --git a/pkg/services/control/ir/service.go b/pkg/services/control/ir/service.go
new file mode 100644
index 000000000..b2db2b43a
--- /dev/null
+++ b/pkg/services/control/ir/service.go
@@ -0,0 +1,46 @@
+package control
+
+// SetBody sets health check request body.
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetHealthStatus sets health status of the IR application.
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ if x != nil {
+ x.HealthStatus = v
+ }
+}
+
+// SetBody sets health check response body.
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
diff --git a/pkg/services/control/ir/service.pb.go b/pkg/services/control/ir/service.pb.go
new file mode 100644
index 000000000..bec74a3be
--- /dev/null
+++ b/pkg/services/control/ir/service.pb.go
@@ -0,0 +1,924 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.4
+// source: pkg/services/control/ir/service.proto
+
+package control
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Health check request.
+type HealthCheckRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of health check request message.
+ Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ // Should be signed by node key or one of
+ // the keys configured by the node.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthCheckRequest) Reset() {
+ *x = HealthCheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Health check response.
+type HealthCheckResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of health check response message.
+ Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthCheckResponse) Reset() {
+ *x = HealthCheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type TickEpochRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *TickEpochRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *TickEpochRequest) Reset() {
+ *x = TickEpochRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TickEpochRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TickEpochRequest) ProtoMessage() {}
+
+func (x *TickEpochRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TickEpochRequest.ProtoReflect.Descriptor instead.
+func (*TickEpochRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *TickEpochRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type TickEpochResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *TickEpochResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *TickEpochResponse) Reset() {
+ *x = TickEpochResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TickEpochResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TickEpochResponse) ProtoMessage() {}
+
+func (x *TickEpochResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TickEpochResponse.ProtoReflect.Descriptor instead.
+func (*TickEpochResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *TickEpochResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type RemoveNodeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *RemoveNodeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *RemoveNodeRequest) Reset() {
+ *x = RemoveNodeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveNodeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveNodeRequest) ProtoMessage() {}
+
+func (x *RemoveNodeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveNodeRequest.ProtoReflect.Descriptor instead.
+func (*RemoveNodeRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *RemoveNodeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type RemoveNodeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *RemoveNodeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *RemoveNodeResponse) Reset() {
+ *x = RemoveNodeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveNodeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveNodeResponse) ProtoMessage() {}
+
+func (x *RemoveNodeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveNodeResponse.ProtoReflect.Descriptor instead.
+func (*RemoveNodeResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *RemoveNodeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Health check request body.
+type HealthCheckRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HealthCheckRequest_Body) Reset() {
+ *x = HealthCheckRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest_Body) ProtoMessage() {}
+
+func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Health check response body
+type HealthCheckResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Health status of IR node application.
+ HealthStatus HealthStatus `protobuf:"varint,1,opt,name=health_status,json=healthStatus,proto3,enum=ircontrol.HealthStatus" json:"health_status,omitempty"`
+}
+
+func (x *HealthCheckResponse_Body) Reset() {
+ *x = HealthCheckResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse_Body) ProtoMessage() {}
+
+func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return HealthStatus_HEALTH_STATUS_UNDEFINED
+}
+
+type TickEpochRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TickEpochRequest_Body) Reset() {
+ *x = TickEpochRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TickEpochRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TickEpochRequest_Body) ProtoMessage() {}
+
+func (x *TickEpochRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TickEpochRequest_Body.ProtoReflect.Descriptor instead.
+func (*TickEpochRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2, 0}
+}
+
+type TickEpochResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TickEpochResponse_Body) Reset() {
+ *x = TickEpochResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TickEpochResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TickEpochResponse_Body) ProtoMessage() {}
+
+func (x *TickEpochResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TickEpochResponse_Body.ProtoReflect.Descriptor instead.
+func (*TickEpochResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3, 0}
+}
+
+type RemoveNodeRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *RemoveNodeRequest_Body) Reset() {
+ *x = RemoveNodeRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveNodeRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveNodeRequest_Body) ProtoMessage() {}
+
+func (x *RemoveNodeRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveNodeRequest_Body.ProtoReflect.Descriptor instead.
+func (*RemoveNodeRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *RemoveNodeRequest_Body) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+type RemoveNodeResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RemoveNodeResponse_Body) Reset() {
+ *x = RemoveNodeResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveNodeResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveNodeResponse_Body) ProtoMessage() {}
+
+func (x *RemoveNodeResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_service_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveNodeResponse_Body.ProtoReflect.Descriptor instead.
+func (*RemoveNodeResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5, 0}
+}
+
+var File_pkg_services_control_ir_service_proto protoreflect.FileDescriptor
+
+var file_pkg_services_control_ir_service_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x1a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69,
+ 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
+ 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x44, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x3c, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x84, 0x01,
+ 0x0a, 0x10, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63,
+ 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04,
+ 0x42, 0x6f, 0x64, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f,
+ 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x98, 0x01,
+ 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
+ 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x6d,
+ 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64,
+ 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
+ 0x6f, 0x64, 0x79, 0x32, 0xf1, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63,
+ 0x68, 0x12, 0x1b, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69,
+ 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c,
+ 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45,
+ 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0a,
+ 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x69, 0x72, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66,
+ 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73,
+ 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_control_ir_service_proto_rawDescOnce sync.Once
+ file_pkg_services_control_ir_service_proto_rawDescData = file_pkg_services_control_ir_service_proto_rawDesc
+)
+
+func file_pkg_services_control_ir_service_proto_rawDescGZIP() []byte {
+ file_pkg_services_control_ir_service_proto_rawDescOnce.Do(func() {
+ file_pkg_services_control_ir_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_service_proto_rawDescData)
+ })
+ return file_pkg_services_control_ir_service_proto_rawDescData
+}
+
+var file_pkg_services_control_ir_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_pkg_services_control_ir_service_proto_goTypes = []interface{}{
+ (*HealthCheckRequest)(nil), // 0: ircontrol.HealthCheckRequest
+ (*HealthCheckResponse)(nil), // 1: ircontrol.HealthCheckResponse
+ (*TickEpochRequest)(nil), // 2: ircontrol.TickEpochRequest
+ (*TickEpochResponse)(nil), // 3: ircontrol.TickEpochResponse
+ (*RemoveNodeRequest)(nil), // 4: ircontrol.RemoveNodeRequest
+ (*RemoveNodeResponse)(nil), // 5: ircontrol.RemoveNodeResponse
+ (*HealthCheckRequest_Body)(nil), // 6: ircontrol.HealthCheckRequest.Body
+ (*HealthCheckResponse_Body)(nil), // 7: ircontrol.HealthCheckResponse.Body
+ (*TickEpochRequest_Body)(nil), // 8: ircontrol.TickEpochRequest.Body
+ (*TickEpochResponse_Body)(nil), // 9: ircontrol.TickEpochResponse.Body
+ (*RemoveNodeRequest_Body)(nil), // 10: ircontrol.RemoveNodeRequest.Body
+ (*RemoveNodeResponse_Body)(nil), // 11: ircontrol.RemoveNodeResponse.Body
+ (*Signature)(nil), // 12: ircontrol.Signature
+ (HealthStatus)(0), // 13: ircontrol.HealthStatus
+}
+var file_pkg_services_control_ir_service_proto_depIdxs = []int32{
+ 6, // 0: ircontrol.HealthCheckRequest.body:type_name -> ircontrol.HealthCheckRequest.Body
+ 12, // 1: ircontrol.HealthCheckRequest.signature:type_name -> ircontrol.Signature
+ 7, // 2: ircontrol.HealthCheckResponse.body:type_name -> ircontrol.HealthCheckResponse.Body
+ 12, // 3: ircontrol.HealthCheckResponse.signature:type_name -> ircontrol.Signature
+ 8, // 4: ircontrol.TickEpochRequest.body:type_name -> ircontrol.TickEpochRequest.Body
+ 12, // 5: ircontrol.TickEpochRequest.signature:type_name -> ircontrol.Signature
+ 9, // 6: ircontrol.TickEpochResponse.body:type_name -> ircontrol.TickEpochResponse.Body
+ 12, // 7: ircontrol.TickEpochResponse.signature:type_name -> ircontrol.Signature
+ 10, // 8: ircontrol.RemoveNodeRequest.body:type_name -> ircontrol.RemoveNodeRequest.Body
+ 12, // 9: ircontrol.RemoveNodeRequest.signature:type_name -> ircontrol.Signature
+ 11, // 10: ircontrol.RemoveNodeResponse.body:type_name -> ircontrol.RemoveNodeResponse.Body
+ 12, // 11: ircontrol.RemoveNodeResponse.signature:type_name -> ircontrol.Signature
+ 13, // 12: ircontrol.HealthCheckResponse.Body.health_status:type_name -> ircontrol.HealthStatus
+ 0, // 13: ircontrol.ControlService.HealthCheck:input_type -> ircontrol.HealthCheckRequest
+ 2, // 14: ircontrol.ControlService.TickEpoch:input_type -> ircontrol.TickEpochRequest
+ 4, // 15: ircontrol.ControlService.RemoveNode:input_type -> ircontrol.RemoveNodeRequest
+ 1, // 16: ircontrol.ControlService.HealthCheck:output_type -> ircontrol.HealthCheckResponse
+ 3, // 17: ircontrol.ControlService.TickEpoch:output_type -> ircontrol.TickEpochResponse
+ 5, // 18: ircontrol.ControlService.RemoveNode:output_type -> ircontrol.RemoveNodeResponse
+ 16, // [16:19] is the sub-list for method output_type
+ 13, // [13:16] is the sub-list for method input_type
+ 13, // [13:13] is the sub-list for extension type_name
+ 13, // [13:13] is the sub-list for extension extendee
+ 0, // [0:13] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_control_ir_service_proto_init() }
+func file_pkg_services_control_ir_service_proto_init() {
+ if File_pkg_services_control_ir_service_proto != nil {
+ return
+ }
+ file_pkg_services_control_ir_types_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_control_ir_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TickEpochRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TickEpochResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveNodeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveNodeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TickEpochRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TickEpochResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveNodeRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_ir_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveNodeResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_control_ir_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_pkg_services_control_ir_service_proto_goTypes,
+ DependencyIndexes: file_pkg_services_control_ir_service_proto_depIdxs,
+ MessageInfos: file_pkg_services_control_ir_service_proto_msgTypes,
+ }.Build()
+ File_pkg_services_control_ir_service_proto = out.File
+ file_pkg_services_control_ir_service_proto_rawDesc = nil
+ file_pkg_services_control_ir_service_proto_goTypes = nil
+ file_pkg_services_control_ir_service_proto_depIdxs = nil
+}
diff --git a/pkg/services/control/ir/service.proto b/pkg/services/control/ir/service.proto
index fa58db568..d647db0df 100644
--- a/pkg/services/control/ir/service.proto
+++ b/pkg/services/control/ir/service.proto
@@ -6,108 +6,72 @@ import "pkg/services/control/ir/types.proto";
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/control";
-// `ControlService` provides an interface for internal work with the Inner Ring
-// node.
+// `ControlService` provides an interface for internal work with the Inner Ring node.
service ControlService {
- // Performs health check of the IR node.
- rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
- // Forces a new epoch to be signaled by the IR node with high probability.
- rpc TickEpoch(TickEpochRequest) returns (TickEpochResponse);
- // Forces a node removal to be signaled by the IR node with high probability.
- rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse);
- // Forces a container removal to be signaled by the IR node with high
- // probability.
- rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse);
+ // Performs health check of the IR node.
+ rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse);
+ // Forces a new epoch to be signaled by the IR node with high probability.
+ rpc TickEpoch (TickEpochRequest) returns (TickEpochResponse);
+ // Forces a node removal to be signaled by the IR node with high probability.
+ rpc RemoveNode (RemoveNodeRequest) returns (RemoveNodeResponse);
}
// Health check request.
message HealthCheckRequest {
- // Health check request body.
- message Body {}
+ // Health check request body.
+ message Body {
+ }
- // Body of health check request message.
- Body body = 1;
+ // Body of health check request message.
+ Body body = 1;
- // Body signature.
- // Should be signed by node key or one of
- // the keys configured by the node.
- Signature signature = 2;
+ // Body signature.
+ // Should be signed by node key or one of
+ // the keys configured by the node.
+ Signature signature = 2;
}
// Health check response.
message HealthCheckResponse {
- // Health check response body
- message Body {
- // Health status of IR node application.
- HealthStatus health_status = 1;
- }
+ // Health check response body
+ message Body {
+ // Health status of IR node application.
+ HealthStatus health_status = 1;
+ }
- // Body of health check response message.
- Body body = 1;
+ // Body of health check response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
message TickEpochRequest {
- message Body {
- // Valid until block value override.
- uint32 vub = 1;
- }
+ message Body{}
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
message TickEpochResponse {
- message Body {
- // Valid until block value for transaction.
- uint32 vub = 1;
- }
+ message Body{}
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
message RemoveNodeRequest {
- message Body {
- bytes key = 1;
- // Valid until block value override.
- uint32 vub = 2;
- }
+ message Body{
+ bytes key = 1;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
message RemoveNodeResponse {
- message Body {
- // Valid until block value for transaction.
- uint32 vub = 1;
- }
+ message Body{}
- Body body = 1;
- Signature signature = 2;
-}
-
-message RemoveContainerRequest {
- message Body {
- bytes container_id = 1;
- bytes owner = 2;
- // Valid until block value override.
- uint32 vub = 3;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message RemoveContainerResponse {
- message Body {
- // Valid until block value for transaction.
- uint32 vub = 1;
- }
-
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index d27746263..f39d3b043 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -2,27 +2,7 @@
package control
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
-)
-
-type HealthCheckRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
- _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
- _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -34,93 +14,18 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckRequest struct {
- Body *HealthCheckRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
- _ json.Marshaler = (*HealthCheckRequest)(nil)
- _ json.Unmarshaler = (*HealthCheckRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -133,6 +38,27 @@ func (x *HealthCheckRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -148,175 +74,13 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *HealthCheckRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthCheckRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- x.Body = v
-}
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthCheckRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthCheckRequest_Body
- f = new(HealthCheckRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckResponse_Body struct {
- HealthStatus HealthStatus `json:"healthStatus"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
- _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
- _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -328,152 +92,26 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if int32(x.HealthStatus) != 0 {
- mm.AppendInt32(1, int32(x.HealthStatus))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.EnumMarshal(1, buf[offset:], int32(x.HealthStatus))
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // HealthStatus
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
- }
- x.HealthStatus = HealthStatus(data)
- }
- }
- return nil
-}
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return 0
-}
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- x.HealthStatus = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"healthStatus\":"
- out.RawString(prefix)
- v := int32(x.HealthStatus)
- if vv, ok := HealthStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "healthStatus":
- {
- var f HealthStatus
- var parsedValue HealthStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := HealthStatus_value[v]; ok {
- parsedValue = HealthStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = HealthStatus(vv)
- case float64:
- parsedValue = HealthStatus(v)
- }
- f = parsedValue
- x.HealthStatus = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckResponse struct {
- Body *HealthCheckResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
- _ json.Marshaler = (*HealthCheckResponse)(nil)
- _ json.Unmarshaler = (*HealthCheckResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -486,6 +124,27 @@ func (x *HealthCheckResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -501,175 +160,13 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *HealthCheckResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthCheckResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- x.Body = v
-}
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthCheckResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthCheckResponse_Body
- f = new(HealthCheckResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TickEpochRequest_Body struct {
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TickEpochRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*TickEpochRequest_Body)(nil)
- _ json.Marshaler = (*TickEpochRequest_Body)(nil)
- _ json.Unmarshaler = (*TickEpochRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -677,143 +174,21 @@ func (x *TickEpochRequest_Body) StableSize() (size int) {
if x == nil {
return 0
}
- size += proto.UInt32Size(1, x.Vub)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TickEpochRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TickEpochRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *TickEpochRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Vub != 0 {
- mm.AppendUint32(1, x.Vub)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TickEpochRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TickEpochRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *TickEpochRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *TickEpochRequest_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TickEpochRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TickEpochRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TickEpochRequest struct {
- Body *TickEpochRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TickEpochRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*TickEpochRequest)(nil)
- _ json.Marshaler = (*TickEpochRequest)(nil)
- _ json.Unmarshaler = (*TickEpochRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -826,6 +201,27 @@ func (x *TickEpochRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TickEpochRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -841,175 +237,13 @@ func (x *TickEpochRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TickEpochRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TickEpochRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *TickEpochRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TickEpochRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TickEpochRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(TickEpochRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) {
- x.Body = v
-}
-func (x *TickEpochRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *TickEpochRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TickEpochRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TickEpochRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TickEpochRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *TickEpochRequest_Body
- f = new(TickEpochRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TickEpochResponse_Body struct {
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TickEpochResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*TickEpochResponse_Body)(nil)
- _ json.Marshaler = (*TickEpochResponse_Body)(nil)
- _ json.Unmarshaler = (*TickEpochResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1017,143 +251,21 @@ func (x *TickEpochResponse_Body) StableSize() (size int) {
if x == nil {
return 0
}
- size += proto.UInt32Size(1, x.Vub)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TickEpochResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TickEpochResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *TickEpochResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Vub != 0 {
- mm.AppendUint32(1, x.Vub)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TickEpochResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TickEpochResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *TickEpochResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *TickEpochResponse_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TickEpochResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TickEpochResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TickEpochResponse struct {
- Body *TickEpochResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TickEpochResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*TickEpochResponse)(nil)
- _ json.Marshaler = (*TickEpochResponse)(nil)
- _ json.Unmarshaler = (*TickEpochResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1166,6 +278,27 @@ func (x *TickEpochResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TickEpochResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1181,176 +314,13 @@ func (x *TickEpochResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TickEpochResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TickEpochResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *TickEpochResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TickEpochResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TickEpochResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(TickEpochResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) {
- x.Body = v
-}
-func (x *TickEpochResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *TickEpochResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TickEpochResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TickEpochResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TickEpochResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *TickEpochResponse_Body
- f = new(TickEpochResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveNodeRequest_Body struct {
- Key []byte `json:"key"`
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveNodeRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest_Body)(nil)
- _ json.Marshaler = (*RemoveNodeRequest_Body)(nil)
- _ json.Unmarshaler = (*RemoveNodeRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1359,187 +329,29 @@ func (x *RemoveNodeRequest_Body) StableSize() (size int) {
return 0
}
size += proto.BytesSize(1, x.Key)
- size += proto.UInt32Size(2, x.Vub)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveNodeRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveNodeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveNodeRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendBytes(1, x.Key)
- }
- if x.Vub != 0 {
- mm.AppendUint32(2, x.Vub)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.Key)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveNodeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *RemoveNodeRequest_Body) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-func (x *RemoveNodeRequest_Body) SetKey(v []byte) {
- x.Key = v
-}
-func (x *RemoveNodeRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *RemoveNodeRequest_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveNodeRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveNodeRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Key = f
- }
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveNodeRequest struct {
- Body *RemoveNodeRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveNodeRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest)(nil)
- _ json.Marshaler = (*RemoveNodeRequest)(nil)
- _ json.Unmarshaler = (*RemoveNodeRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1552,6 +364,27 @@ func (x *RemoveNodeRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveNodeRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1567,175 +400,13 @@ func (x *RemoveNodeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveNodeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveNodeRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *RemoveNodeRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveNodeRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveNodeRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) {
- x.Body = v
-}
-func (x *RemoveNodeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveNodeRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveNodeRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveNodeRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveNodeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveNodeRequest_Body
- f = new(RemoveNodeRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveNodeResponse_Body struct {
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveNodeResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse_Body)(nil)
- _ json.Marshaler = (*RemoveNodeResponse_Body)(nil)
- _ json.Unmarshaler = (*RemoveNodeResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1743,143 +414,21 @@ func (x *RemoveNodeResponse_Body) StableSize() (size int) {
if x == nil {
return 0
}
- size += proto.UInt32Size(1, x.Vub)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveNodeResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveNodeResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *RemoveNodeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Vub != 0 {
- mm.AppendUint32(1, x.Vub)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveNodeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *RemoveNodeResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *RemoveNodeResponse_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveNodeResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveNodeResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveNodeResponse struct {
- Body *RemoveNodeResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveNodeResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse)(nil)
- _ json.Marshaler = (*RemoveNodeResponse)(nil)
- _ json.Unmarshaler = (*RemoveNodeResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1892,6 +441,27 @@ func (x *RemoveNodeResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveNodeResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1907,932 +477,9 @@ func (x *RemoveNodeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveNodeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveNodeResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveNodeResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveNodeResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) {
- x.Body = v
-}
-func (x *RemoveNodeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveNodeResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveNodeResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveNodeResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveNodeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveNodeResponse_Body
- f = new(RemoveNodeResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveContainerRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- Owner []byte `json:"owner"`
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveContainerRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest_Body)(nil)
- _ json.Marshaler = (*RemoveContainerRequest_Body)(nil)
- _ json.Unmarshaler = (*RemoveContainerRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveContainerRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.BytesSize(1, x.ContainerId)
- size += proto.BytesSize(2, x.Owner)
- size += proto.UInt32Size(3, x.Vub)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveContainerRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveContainerRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.Owner) != 0 {
- mm.AppendBytes(2, x.Owner)
- }
- if x.Vub != 0 {
- mm.AppendUint32(3, x.Vub)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveContainerRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // Owner
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Owner")
- }
- x.Owner = data
- case 3: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *RemoveContainerRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *RemoveContainerRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *RemoveContainerRequest_Body) GetOwner() []byte {
- if x != nil {
- return x.Owner
- }
- return nil
-}
-func (x *RemoveContainerRequest_Body) SetOwner(v []byte) {
- x.Owner = v
-}
-func (x *RemoveContainerRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *RemoveContainerRequest_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveContainerRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"owner\":"
- out.RawString(prefix)
- if x.Owner != nil {
- out.Base64Bytes(x.Owner)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveContainerRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "owner":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Owner = f
- }
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveContainerRequest struct {
- Body *RemoveContainerRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveContainerRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest)(nil)
- _ json.Marshaler = (*RemoveContainerRequest)(nil)
- _ json.Unmarshaler = (*RemoveContainerRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveContainerRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveContainerRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveContainerRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveContainerRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveContainerRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveContainerRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveContainerRequest) SetBody(v *RemoveContainerRequest_Body) {
- x.Body = v
-}
-func (x *RemoveContainerRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveContainerRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveContainerRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveContainerRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveContainerRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveContainerRequest_Body
- f = new(RemoveContainerRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveContainerResponse_Body struct {
- Vub uint32 `json:"vub"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveContainerResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse_Body)(nil)
- _ json.Marshaler = (*RemoveContainerResponse_Body)(nil)
- _ json.Unmarshaler = (*RemoveContainerResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveContainerResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.UInt32Size(1, x.Vub)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveContainerResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveContainerResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Vub != 0 {
- mm.AppendUint32(1, x.Vub)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveContainerResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Vub
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Vub")
- }
- x.Vub = data
- }
- }
- return nil
-}
-func (x *RemoveContainerResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-func (x *RemoveContainerResponse_Body) SetVub(v uint32) {
- x.Vub = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveContainerResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"vub\":"
- out.RawString(prefix)
- out.Uint32(x.Vub)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveContainerResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "vub":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Vub = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveContainerResponse struct {
- Body *RemoveContainerResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveContainerResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse)(nil)
- _ json.Marshaler = (*RemoveContainerResponse)(nil)
- _ json.Unmarshaler = (*RemoveContainerResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveContainerResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveContainerResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveContainerResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveContainerResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveContainerResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveContainerResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveContainerResponse) SetBody(v *RemoveContainerResponse_Body) {
- x.Body = v
-}
-func (x *RemoveContainerResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveContainerResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveContainerResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveContainerResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveContainerResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveContainerResponse_Body
- f = new(RemoveContainerResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
+func (x *RemoveNodeResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go
index 336bf5f70..6ba214da0 100644
--- a/pkg/services/control/ir/service_grpc.pb.go
+++ b/pkg/services/control/ir/service_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.0
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.12.4
// source: pkg/services/control/ir/service.proto
package control
@@ -18,13 +18,6 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
-const (
- ControlService_HealthCheck_FullMethodName = "/ircontrol.ControlService/HealthCheck"
- ControlService_TickEpoch_FullMethodName = "/ircontrol.ControlService/TickEpoch"
- ControlService_RemoveNode_FullMethodName = "/ircontrol.ControlService/RemoveNode"
- ControlService_RemoveContainer_FullMethodName = "/ircontrol.ControlService/RemoveContainer"
-)
-
// ControlServiceClient is the client API for ControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -35,9 +28,6 @@ type ControlServiceClient interface {
TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error)
// Forces a node removal to be signaled by the IR node with high probability.
RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error)
- // Forces a container removal to be signaled by the IR node with high
- // probability.
- RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error)
}
type controlServiceClient struct {
@@ -50,7 +40,7 @@ func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient {
func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ircontrol.ControlService/HealthCheck", in, out, opts...)
if err != nil {
return nil, err
}
@@ -59,7 +49,7 @@ func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckR
func (c *controlServiceClient) TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error) {
out := new(TickEpochResponse)
- err := c.cc.Invoke(ctx, ControlService_TickEpoch_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ircontrol.ControlService/TickEpoch", in, out, opts...)
if err != nil {
return nil, err
}
@@ -68,16 +58,7 @@ func (c *controlServiceClient) TickEpoch(ctx context.Context, in *TickEpochReque
func (c *controlServiceClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) {
out := new(RemoveNodeResponse)
- err := c.cc.Invoke(ctx, ControlService_RemoveNode_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) {
- out := new(RemoveContainerResponse)
- err := c.cc.Invoke(ctx, ControlService_RemoveContainer_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ircontrol.ControlService/RemoveNode", in, out, opts...)
if err != nil {
return nil, err
}
@@ -94,9 +75,6 @@ type ControlServiceServer interface {
TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error)
// Forces a node removal to be signaled by the IR node with high probability.
RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error)
- // Forces a container removal to be signaled by the IR node with high
- // probability.
- RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -112,9 +90,6 @@ func (UnimplementedControlServiceServer) TickEpoch(context.Context, *TickEpochRe
func (UnimplementedControlServiceServer) RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemoveNode not implemented")
}
-func (UnimplementedControlServiceServer) RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RemoveContainer not implemented")
-}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -137,7 +112,7 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: ControlService_HealthCheck_FullMethodName,
+ FullMethod: "/ircontrol.ControlService/HealthCheck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest))
@@ -155,7 +130,7 @@ func _ControlService_TickEpoch_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: ControlService_TickEpoch_FullMethodName,
+ FullMethod: "/ircontrol.ControlService/TickEpoch",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).TickEpoch(ctx, req.(*TickEpochRequest))
@@ -173,7 +148,7 @@ func _ControlService_RemoveNode_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: ControlService_RemoveNode_FullMethodName,
+ FullMethod: "/ircontrol.ControlService/RemoveNode",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).RemoveNode(ctx, req.(*RemoveNodeRequest))
@@ -181,24 +156,6 @@ func _ControlService_RemoveNode_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
-func _ControlService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RemoveContainerRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).RemoveContainer(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_RemoveContainer_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).RemoveContainer(ctx, req.(*RemoveContainerRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -218,10 +175,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "RemoveNode",
Handler: _ControlService_RemoveNode_Handler,
},
- {
- MethodName: "RemoveContainer",
- Handler: _ControlService_RemoveContainer_Handler,
- },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/ir/service.proto",
diff --git a/pkg/services/control/ir/service_test.go b/pkg/services/control/ir/service_test.go
new file mode 100644
index 000000000..54eef5148
--- /dev/null
+++ b/pkg/services/control/ir/service_test.go
@@ -0,0 +1,44 @@
+package control_test
+
+import (
+ "testing"
+
+ control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/proto"
+)
+
+type protoMessage interface {
+ StableMarshal([]byte) []byte
+ proto.Message
+}
+
+func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
+ require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
+
+ require.True(t, cmp(m1, m2))
+}
+
+func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ generateHealthCheckResponseBody(),
+ new(control.HealthCheckResponse_Body),
+ func(m1, m2 protoMessage) bool {
+ return equalHealthCheckResponseBodies(
+ m1.(*control.HealthCheckResponse_Body),
+ m2.(*control.HealthCheckResponse_Body),
+ )
+ },
+ )
+}
+
+func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
+ body := new(control.HealthCheckResponse_Body)
+ body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
+
+ return body
+}
+
+func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
+ return b1.GetHealthStatus() == b2.GetHealthStatus()
+}
diff --git a/pkg/services/control/ir/types.go b/pkg/services/control/ir/types.go
new file mode 100644
index 000000000..97ffd3ce3
--- /dev/null
+++ b/pkg/services/control/ir/types.go
@@ -0,0 +1,15 @@
+package control
+
+// SetKey sets public key used for signing.
+func (x *Signature) SetKey(v []byte) {
+ if x != nil {
+ x.Key = v
+ }
+}
+
+// SetSign sets binary signature.
+func (x *Signature) SetSign(v []byte) {
+ if x != nil {
+ x.Sign = v
+ }
+}
diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go
new file mode 100644
index 000000000..8107b917e
--- /dev/null
+++ b/pkg/services/control/ir/types.pb.go
@@ -0,0 +1,224 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.12.4
+// source: pkg/services/control/ir/types.proto
+
+package control
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Health status of the IR application.
+type HealthStatus int32
+
+const (
+ // Undefined status, default value.
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ // IR application is starting.
+ HealthStatus_STARTING HealthStatus = 1
+ // IR application is started and serves all services.
+ HealthStatus_READY HealthStatus = 2
+ // IR application is shutting down.
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+)
+
+// Enum value maps for HealthStatus.
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ }
+)
+
+func (x HealthStatus) Enum() *HealthStatus {
+ p := new(HealthStatus)
+ *p = x
+ return p
+}
+
+func (x HealthStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_control_ir_types_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthStatus) Type() protoreflect.EnumType {
+ return &file_pkg_services_control_ir_types_proto_enumTypes[0]
+}
+
+func (x HealthStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthStatus.Descriptor instead.
+func (HealthStatus) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
+}
+
+// Signature of some message.
+type Signature struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Public key used for signing.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Binary signature.
+ Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
+}
+
+func (x *Signature) Reset() {
+ *x = Signature{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Signature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Signature) ProtoMessage() {}
+
+func (x *Signature) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
+func (*Signature) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+
+var File_pkg_services_control_ir_types_proto protoreflect.FileDescriptor
+
+var file_pkg_services_control_ir_types_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x57, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c,
+ 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49,
+ 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e,
+ 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11,
+ 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10,
+ 0x03, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73,
+ 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c,
+ 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
+ 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_control_ir_types_proto_rawDescOnce sync.Once
+ file_pkg_services_control_ir_types_proto_rawDescData = file_pkg_services_control_ir_types_proto_rawDesc
+)
+
+func file_pkg_services_control_ir_types_proto_rawDescGZIP() []byte {
+ file_pkg_services_control_ir_types_proto_rawDescOnce.Do(func() {
+ file_pkg_services_control_ir_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_types_proto_rawDescData)
+ })
+ return file_pkg_services_control_ir_types_proto_rawDescData
+}
+
+var file_pkg_services_control_ir_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_pkg_services_control_ir_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pkg_services_control_ir_types_proto_goTypes = []interface{}{
+ (HealthStatus)(0), // 0: ircontrol.HealthStatus
+ (*Signature)(nil), // 1: ircontrol.Signature
+}
+var file_pkg_services_control_ir_types_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_control_ir_types_proto_init() }
+func file_pkg_services_control_ir_types_proto_init() {
+ if File_pkg_services_control_ir_types_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_control_ir_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Signature); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_control_ir_types_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_services_control_ir_types_proto_goTypes,
+ DependencyIndexes: file_pkg_services_control_ir_types_proto_depIdxs,
+ EnumInfos: file_pkg_services_control_ir_types_proto_enumTypes,
+ MessageInfos: file_pkg_services_control_ir_types_proto_msgTypes,
+ }.Build()
+ File_pkg_services_control_ir_types_proto = out.File
+ file_pkg_services_control_ir_types_proto_rawDesc = nil
+ file_pkg_services_control_ir_types_proto_goTypes = nil
+ file_pkg_services_control_ir_types_proto_depIdxs = nil
+}
diff --git a/pkg/services/control/ir/types.proto b/pkg/services/control/ir/types.proto
index 901a55918..a6897fad1 100644
--- a/pkg/services/control/ir/types.proto
+++ b/pkg/services/control/ir/types.proto
@@ -6,27 +6,24 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/
// Signature of some message.
message Signature {
- // Public key used for signing.
- bytes key = 1 [ json_name = "key" ];
+ // Public key used for signing.
+ bytes key = 1 [json_name = "key"];
- // Binary signature.
- bytes sign = 2 [ json_name = "signature" ];
+ // Binary signature.
+ bytes sign = 2 [json_name = "signature"];
}
// Health status of the IR application.
enum HealthStatus {
- // Undefined status, default value.
- HEALTH_STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ HEALTH_STATUS_UNDEFINED = 0;
- // IR application is starting.
- STARTING = 1;
+ // IR application is starting.
+ STARTING = 1;
- // IR application is started and serves all services.
- READY = 2;
+ // IR application is started and serves all services.
+ READY = 2;
- // IR application is shutting down.
- SHUTTING_DOWN = 3;
-
- // IR application is reconfiguring.
- RECONFIGURING = 4;
+ // IR application is shutting down.
+ SHUTTING_DOWN = 3;
}
diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go
index 407eec6ad..ef2fc458e 100644
--- a/pkg/services/control/ir/types_frostfs.pb.go
+++ b/pkg/services/control/ir/types_frostfs.pb.go
@@ -2,70 +2,7 @@
package control
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
-)
-
-type HealthStatus int32
-
-const (
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- HealthStatus_STARTING HealthStatus = 1
- HealthStatus_READY HealthStatus = 2
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
- HealthStatus_RECONFIGURING HealthStatus = 4
-)
-
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- 4: "RECONFIGURING",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- "RECONFIGURING": 4,
- }
-)
-
-func (x HealthStatus) String() string {
- if v, ok := HealthStatus_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *HealthStatus) FromString(s string) bool {
- if v, ok := HealthStatus_value[s]; ok {
- *x = HealthStatus(v)
- return true
- }
- return false
-}
-
-type Signature struct {
- Key []byte `json:"key"`
- Sign []byte `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*Signature)(nil)
- _ encoding.ProtoUnmarshaler = (*Signature)(nil)
- _ json.Marshaler = (*Signature)(nil)
- _ json.Unmarshaler = (*Signature)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -79,169 +16,23 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *Signature) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *Signature) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendBytes(1, x.Key)
- }
- if len(x.Sign) != 0 {
- mm.AppendBytes(2, x.Sign)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "Signature")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Sign
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Sign")
- }
- x.Sign = data
- }
- }
- return nil
-}
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-func (x *Signature) SetKey(v []byte) {
- x.Key = v
-}
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-func (x *Signature) SetSign(v []byte) {
- x.Sign = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Signature) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *Signature) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Key = f
- }
- case "signature":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Sign = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.Key)
+ offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
+ return buf
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 0c4236d0e..a2e7c411a 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -1,38 +1,25 @@
package control
import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common"
)
const serviceName = "control.ControlService"
const (
- rpcHealthCheck = "HealthCheck"
- rpcSetNetmapStatus = "SetNetmapStatus"
- rpcGetNetmapStatus = "GetNetmapStatus"
- rpcDropObjects = "DropObjects"
- rpcListShards = "ListShards"
- rpcSetShardMode = "SetShardMode"
- rpcSynchronizeTree = "SynchronizeTree"
- rpcStartShardEvacuation = "StartShardEvacuation"
- rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
- rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus"
- rpcStopShardEvacuation = "StopShardEvacuation"
- rpcFlushCache = "FlushCache"
- rpcDoctor = "Doctor"
- rpcAddChainLocalOverride = "AddChainLocalOverride"
- rpcGetChainLocalOverride = "GetChainLocalOverride"
- rpcListChainLocalOverrides = "ListChainLocalOverrides"
- rpcRemoveChainLocalOverride = "RemoveChainLocalOverride"
- rpcRemoveChainLocalOverridesByTarget = "RemoveChainLocalOverridesByTarget"
- rpcSealWriteCache = "SealWriteCache"
- rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
- rpcDetachShards = "DetachShards"
- rpcStartShardRebuild = "StartShardRebuild"
- rpcListShardsForObject = "ListShardsForObject"
+ rpcHealthCheck = "HealthCheck"
+ rpcSetNetmapStatus = "SetNetmapStatus"
+ rpcDropObjects = "DropObjects"
+ rpcListShards = "ListShards"
+ rpcSetShardMode = "SetShardMode"
+ rpcSynchronizeTree = "SynchronizeTree"
+ rpcEvacuateShard = "EvacuateShard"
+ rpcStartShardEvacuation = "StartShardEvacuation"
+ rpcGetShardEvacuationStatus = "GetShardEvacuationStatus"
+ rpcStopShardEvacuation = "StopShardEvacuation"
+ rpcFlushCache = "FlushCache"
+ rpcDoctor = "Doctor"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -74,27 +61,6 @@ func SetNetmapStatus(
return wResp.message, nil
}
-// GetNetmapStatus executes ControlService.GetNetmapStatus RPC.
-func GetNetmapStatus(
- _ context.Context,
- cli *client.Client,
- req *GetNetmapStatusRequest,
- opts ...client.CallOption,
-) (*GetNetmapStatusResponse, error) {
- wResp := newResponseWrapper[GetNetmapStatusResponse]()
-
- wReq := &requestWrapper{
- m: req,
- }
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetNetmapStatus), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
// DropObjects executes ControlService.DropObjects RPC.
func DropObjects(
cli *client.Client,
@@ -165,6 +131,19 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl
return wResp.message, nil
}
+// EvacuateShard executes ControlService.EvacuateShard RPC.
+func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) {
+ wResp := newResponseWrapper[EvacuateShardResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
+
// StartShardEvacuation executes ControlService.StartShardEvacuation RPC.
func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) {
wResp := newResponseWrapper[StartShardEvacuationResponse]()
@@ -204,19 +183,6 @@ func StopShardEvacuation(cli *client.Client, req *StopShardEvacuationRequest, op
return wResp.message, nil
}
-// ResetShardEvacuationStatus executes ControlService.ResetShardEvacuationStatus RPC.
-func ResetShardEvacuationStatus(cli *client.Client, req *ResetShardEvacuationStatusRequest, opts ...client.CallOption) (*ResetShardEvacuationStatusResponse, error) {
- wResp := newResponseWrapper[ResetShardEvacuationStatusResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcResetShardEvacuationStatus), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
// FlushCache executes ControlService.FlushCache RPC.
func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallOption) (*FlushCacheResponse, error) {
wResp := newResponseWrapper[FlushCacheResponse]()
@@ -242,145 +208,3 @@ func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (
return wResp.message, nil
}
-
-// AddChainLocalOverride executes ControlService.AddChainLocalOverride RPC.
-func AddChainLocalOverride(cli *client.Client, req *AddChainLocalOverrideRequest, opts ...client.CallOption) (*AddChainLocalOverrideResponse, error) {
- wResp := newResponseWrapper[AddChainLocalOverrideResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcAddChainLocalOverride), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// ListChainLocalOverrides executes ControlService.ListChainLocalOverrides RPC.
-func ListChainLocalOverrides(cli *client.Client, req *ListChainLocalOverridesRequest, opts ...client.CallOption) (*ListChainLocalOverridesResponse, error) {
- wResp := newResponseWrapper[ListChainLocalOverridesResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListChainLocalOverrides), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// ListTargetsLocalOverrides executes ControlService.ListTargetsLocalOverrides RPC.
-func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverridesRequest, opts ...client.CallOption) (*ListTargetsLocalOverridesResponse, error) {
- wResp := newResponseWrapper[ListTargetsLocalOverridesResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListTargetsLocalOverrides), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
-func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
- wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcGetChainLocalOverride), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
-func RemoveChainLocalOverride(cli *client.Client, req *RemoveChainLocalOverrideRequest, opts ...client.CallOption) (*RemoveChainLocalOverrideResponse, error) {
- wResp := newResponseWrapper[RemoveChainLocalOverrideResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverride), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// RemoveChainLocalOverridesByTarget executes ControlService.RemoveChainLocalOverridesByTarget RPC.
-func RemoveChainLocalOverridesByTarget(cli *client.Client, req *RemoveChainLocalOverridesByTargetRequest, opts ...client.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) {
- wResp := newResponseWrapper[RemoveChainLocalOverridesByTargetResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRemoveChainLocalOverridesByTarget), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// SealWriteCache executes ControlService.SealWriteCache RPC.
-func SealWriteCache(cli *client.Client, req *SealWriteCacheRequest, opts ...client.CallOption) (*SealWriteCacheResponse, error) {
- wResp := newResponseWrapper[SealWriteCacheResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSealWriteCache), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// DetachShards executes ControlService.DetachShards RPC.
-func DetachShards(
- cli *client.Client,
- req *DetachShardsRequest,
- opts ...client.CallOption,
-) (*DetachShardsResponse, error) {
- wResp := newResponseWrapper[DetachShardsResponse]()
-
- wReq := &requestWrapper{
- m: req,
- }
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDetachShards), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// StartShardRebuild executes ControlService.StartShardRebuild RPC.
-func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) {
- wResp := newResponseWrapper[StartShardRebuildResponse]()
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
-
-// ListShardsForObject executes ControlService.ListShardsForObject RPC.
-func ListShardsForObject(
- cli *client.Client,
- req *ListShardsForObjectRequest,
- opts ...client.CallOption,
-) (*ListShardsForObjectResponse, error) {
- wResp := newResponseWrapper[ListShardsForObjectResponse]()
-
- wReq := &requestWrapper{
- m: req,
- }
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.message, nil
-}
diff --git a/pkg/services/control/server/ape/validate.go b/pkg/services/control/server/ape/validate.go
deleted file mode 100644
index f4aa0399f..000000000
--- a/pkg/services/control/server/ape/validate.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package ape
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
-)
-
-var (
- ErrInvalidResource = errors.New("invalid resource name")
- ErrUnsupportedPrefix = errors.New("unsupported resource name prefix")
- ErrInvalidContainerID = errors.New("invalid container id")
- ErrInvalidObjectID = errors.New("invalid object id")
- ErrInvalidNamespace = fmt.Errorf("namespace must match regexp: %s", ape.NamespaceNameRegexp.String())
-)
-
-// ValidateResourceName validates resource name components - container and object id, namespace.
-// Also validates matching resource name to templates of policy engine's native scheme.
-func ValidateResourceName(name string) error {
- if after, found := strings.CutPrefix(name, native.ObjectPrefix+"/"); found {
- return validateObjectResourceName(after)
- } else if after, found = strings.CutPrefix(name, native.ContainerPrefix+"/"); found {
- return validateContainerResourceName(after)
- }
- return ErrUnsupportedPrefix
-}
-
-// validateObjectResourceName validate name for object.
-// Name should be without prefix `native.ObjectPrefix`.
-func validateObjectResourceName(name string) error {
- if name == "*" {
- return nil
- }
- lexems := strings.Split(name, "/")
- if len(lexems) == 1 && lexems[0] == "*" {
- return nil
- } else if len(lexems) == 2 {
- // len == 2 means format `namespace(root_namespace)/*`
- if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
- return ErrInvalidNamespace
- }
- if lexems[1] == "*" {
- return nil
- }
- } else if len(lexems) == 3 {
- // len == 3 means format `namespace(root_namespace)/CID/OID(*)`
- if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
- return ErrInvalidNamespace
- }
- var cnr cid.ID
- err := cnr.DecodeString(lexems[1])
- if err != nil {
- return fmt.Errorf("%w: %w", ErrInvalidContainerID, err)
- }
- if lexems[2] == "*" {
- return nil
- }
- var objID oid.ID
- err = objID.DecodeString(lexems[2])
- if err != nil {
- return fmt.Errorf("%w: %w", ErrInvalidObjectID, err)
- }
- return nil
- }
- return ErrInvalidResource
-}
-
-// validateContainerResourceName validate resource name for container.
-// Name should be without prefix `native.ContainerPrefix`.
-func validateContainerResourceName(name string) error {
- if name == "*" {
- return nil
- }
- lexems := strings.Split(name, "/")
- if len(lexems) == 1 && lexems[0] == "*" {
- return nil
- } else if len(lexems) == 2 {
- // len == 2 means format `namespace(root_namespace)/CID(*)`
- if lexems[0] != "" && !ape.NamespaceNameRegexp.MatchString(lexems[0]) {
- return ErrInvalidNamespace
- }
- if lexems[1] != "*" {
- var cnr cid.ID
- err := cnr.DecodeString(lexems[1])
- if err != nil {
- return fmt.Errorf("%w: %w", ErrInvalidContainerID, err)
- }
- }
- return nil
- }
- return ErrInvalidResource
-}
diff --git a/pkg/services/control/server/ape/validate_test.go b/pkg/services/control/server/ape/validate_test.go
deleted file mode 100644
index af811efed..000000000
--- a/pkg/services/control/server/ape/validate_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package ape
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-func TestValidationOfChainResources(t *testing.T) {
- tests := [...]struct {
- testName string
- resourceName string
- expectErr error
- }{
- {
- testName: "native object: all objects",
- resourceName: native.ObjectPrefix + "/*",
- },
- {
- testName: "native object: all objects in namespace",
- resourceName: native.ObjectPrefix + "/ns/*",
- },
- {
- testName: "native object: all objects in root namespace",
- resourceName: native.ObjectPrefix + "//*",
- },
- {
- testName: "native object: all objects in namespace/container",
- resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*",
- },
- {
- testName: "native object: all objects in root namespace/container",
- resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/*",
- },
- {
- testName: "native object: object in namespace/container",
- resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY",
- },
- {
- testName: "native object: object in root namespace/container",
- resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY",
- },
- {
- testName: "native object: invalid all objects",
- resourceName: native.ObjectPrefix + "/*12313",
- expectErr: ErrInvalidResource,
- },
- {
- testName: "native object: all objects in invalid namespace",
- resourceName: native.ObjectPrefix + "/qwe_123123/*",
- expectErr: ErrInvalidNamespace,
- },
- {
- testName: "native object: invalid all objects in root namespace",
- resourceName: native.ObjectPrefix + "//qwe",
- expectErr: ErrInvalidResource,
- },
- {
- testName: "native object: invalid cid in all objects in root namespace",
- resourceName: native.ObjectPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*",
- expectErr: ErrInvalidContainerID,
- },
- {
- testName: "native object: invalid cid in all objects in namespace",
- resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytHqwe/*",
- expectErr: ErrInvalidContainerID,
- },
- {
- testName: "native object: invalid object in namespace/container",
- resourceName: native.ObjectPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH/BCGsUu6o92oG1UALVox1sV6YbBUKUL2xSCtAFkrsuvWY111",
- expectErr: ErrInvalidObjectID,
- },
- {
- testName: "native object: invalid resource",
- resourceName: native.ObjectPrefix + "/ns/SeHNpifD/AFkrsuvWY111/AFkrsuvWY222",
- expectErr: ErrInvalidResource,
- },
- {
- testName: "native container: all containers",
- resourceName: native.ContainerPrefix + "/*",
- },
- {
- testName: "native container: all containers in namespace",
- resourceName: native.ContainerPrefix + "/ns/*",
- },
- {
- testName: "native container: all containers in root namespace",
- resourceName: native.ContainerPrefix + "//*",
- },
- {
- testName: "native container: container in namespace",
- resourceName: native.ContainerPrefix + "/ns/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
- },
- {
- testName: "native container: container in root namespace",
- resourceName: native.ContainerPrefix + "//SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
- },
- {
- testName: "native container: invalid all containers",
- resourceName: native.ContainerPrefix + "/*asd",
- expectErr: ErrInvalidResource,
- },
- {
- testName: "native container: invalid resource",
- resourceName: native.ContainerPrefix + "/ns/cid/cid",
- expectErr: ErrInvalidResource,
- },
- {
- testName: "native container: invalid container in root namespace",
- resourceName: native.ContainerPrefix + "//*asd",
- expectErr: ErrInvalidContainerID,
- },
- {
- testName: "native container: container in invalid namespace",
- resourceName: native.ContainerPrefix + "/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
- expectErr: ErrInvalidNamespace,
- },
- {
- testName: "unsupported prefix",
- resourceName: "native:test/ns_111/SeHNpifDH2Fc4scNBphrbmrKi96QXj2HzYJkhSGuytH",
- expectErr: ErrUnsupportedPrefix,
- },
- }
-
- for _, test := range tests {
- t.Run(test.testName, func(t *testing.T) {
- err := ValidateResourceName(test.resourceName)
- require.ErrorIs(t, err, test.expectErr)
- })
- }
-}
diff --git a/pkg/services/control/server/convert.go b/pkg/services/control/server/convert.go
index 61d7e41c1..1d29ed406 100644
--- a/pkg/services/control/server/convert.go
+++ b/pkg/services/control/server/convert.go
@@ -1,6 +1,7 @@
package control
import (
+ "fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
@@ -15,7 +16,7 @@ func stateToResponse(state *engine.EvacuationState) (*control.GetShardEvacuation
for _, shID := range state.ShardIDs() {
id, err := base58.Decode(shID)
if err != nil {
- return nil, status.Error(codes.Internal, "invalid shard id format: "+shID)
+ return nil, status.Error(codes.Internal, fmt.Sprintf("invalid shard id format: %s", shID))
}
shardIDs = append(shardIDs, id)
}
@@ -46,18 +47,14 @@ func stateToResponse(state *engine.EvacuationState) (*control.GetShardEvacuation
}
return &control.GetShardEvacuationStatusResponse{
Body: &control.GetShardEvacuationStatusResponse_Body{
- Shard_ID: shardIDs,
- EvacuatedObjects: state.ObjectsEvacuated(),
- TotalObjects: state.ObjectsTotal(),
- FailedObjects: state.ObjectsFailed(),
- Status: evacStatus,
- StartedAt: startedAt,
- Duration: duration,
- ErrorMessage: state.ErrorMessage(),
- SkippedObjects: state.ObjectsSkipped(),
- TotalTrees: state.TreesTotal(),
- EvacuatedTrees: state.TreesEvacuated(),
- FailedTrees: state.TreesFailed(),
+ Shard_ID: shardIDs,
+ Evacuated: state.Evacuated(),
+ Total: state.Total(),
+ Failed: state.Failed(),
+ Status: evacStatus,
+ StartedAt: startedAt,
+ Duration: duration,
+ ErrorMessage: state.ErrorMessage(),
},
}, nil
}
diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go
deleted file mode 100644
index d9d5c5f5e..000000000
--- a/pkg/services/control/server/ctrlmessage/sign.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package ctrlmessage
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
-)
-
-type SignedMessage interface {
- ReadSignedData([]byte) ([]byte, error)
- GetSignature() *control.Signature
- SetSignature(*control.Signature)
-}
-
-// Sign signs Control service ctrlmessage with private key.
-func Sign(key *ecdsa.PrivateKey, msg SignedMessage) error {
- binBody, err := msg.ReadSignedData(nil)
- if err != nil {
- return fmt.Errorf("marshal request body: %w", err)
- }
-
- var sig frostfscrypto.Signature
-
- err = sig.Calculate(frostfsecdsa.Signer(*key), binBody)
- if err != nil {
- return fmt.Errorf("calculate signature: %w", err)
- }
-
- // TODO(@cthulhu-rider): #468 use Signature ctrlmessage from FrostFS API to avoid conversion
- var sigV2 refs.Signature
- sig.WriteToV2(&sigV2)
-
- var sigControl control.Signature
- sigControl.SetKey(sigV2.GetKey())
- sigControl.SetSign(sigV2.GetSign())
-
- msg.SetSignature(&sigControl)
-
- return nil
-}
diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go
deleted file mode 100644
index ffd36962b..000000000
--- a/pkg/services/control/server/detach_shards.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package control
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- shardIDs := s.getShardIDList(req.GetBody().GetShard_ID())
-
- if err := s.s.DetachShards(ctx, shardIDs); err != nil {
- if errors.As(err, new(logicerr.Logical)) {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.DetachShardsResponse{
- Body: &control.DetachShardsResponse_Body{},
- }
-
- if err = ctrlmessage.Sign(s.key, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- return resp, nil
-}
diff --git a/pkg/services/control/server/doctor.go b/pkg/services/control/server/doctor.go
index 80041de44..2c91d4c2b 100644
--- a/pkg/services/control/server/doctor.go
+++ b/pkg/services/control/server/doctor.go
@@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -16,12 +15,12 @@ func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*contr
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- if !req.GetBody().GetRemoveDuplicates() {
+ if !req.Body.RemoveDuplicates {
return nil, status.Error(codes.InvalidArgument, "operation not specified")
}
var prm engine.RemoveDuplicatesPrm
- prm.Concurrency = int(req.GetBody().GetConcurrency())
+ prm.Concurrency = int(req.Body.Concurrency)
err = s.s.RemoveDuplicates(ctx, prm)
if err != nil {
@@ -30,7 +29,7 @@ func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*contr
resp := &control.DoctorResponse{Body: &control.DoctorResponse_Body{}}
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
new file mode 100644
index 000000000..8f62c3489
--- /dev/null
+++ b/pkg/services/control/server/evacuate.go
@@ -0,0 +1,108 @@
+package control
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ var prm engine.EvacuateShardPrm
+ prm.WithShardIDList(s.getShardIDList(req.GetBody().GetShard_ID()))
+ prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
+ prm.WithFaultHandler(s.replicate)
+
+ res, err := s.s.Evacuate(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.EvacuateShardResponse{
+ Body: &control.EvacuateShardResponse_Body{
+ Count: uint32(res.Evacuated()),
+ },
+ }
+
+ err = SignMessage(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
+
+func (s *Server) replicate(ctx context.Context, addr oid.Address, obj *objectSDK.Object) error {
+ cid, ok := obj.ContainerID()
+ if !ok {
+ // Return nil to prevent situations where a shard can't be evacuated
+ // because of a single bad/corrupted object.
+ return nil
+ }
+
+ nm, err := s.netMapSrc.GetNetMap(0)
+ if err != nil {
+ return err
+ }
+
+ c, err := s.cnrSrc.Get(cid)
+ if err != nil {
+ return err
+ }
+
+ binCnr := make([]byte, sha256.Size)
+ cid.Encode(binCnr)
+
+ ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
+ if err != nil {
+ return fmt.Errorf("can't build a list of container nodes")
+ }
+
+ nodes := placement.FlattenNodes(ns)
+ bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
+ for i := 0; i < len(nodes); i++ {
+ if bytes.Equal(nodes[i].PublicKey(), bs) {
+ copy(nodes[i:], nodes[i+1:])
+ nodes = nodes[:len(nodes)-1]
+ }
+ }
+
+ var res replicatorResult
+ task := replicator.Task{
+ NumCopies: 1,
+ Addr: addr,
+ Obj: obj,
+ Nodes: nodes,
+ }
+ s.replicator.HandleTask(ctx, task, &res)
+
+ if res.count == 0 {
+ return errors.New("object was not replicated")
+ }
+ return nil
+}
+
+type replicatorResult struct {
+ count int
+}
+
+// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
+func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
+ r.count++
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index f3ba9015e..cdf3656e2 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -1,54 +1,30 @@
package control
import (
- "bytes"
"context"
- "crypto/sha256"
- "encoding/hex"
"errors"
- "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes")
-
func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
}
- if req.GetBody().GetScope() == uint32(control.StartShardEvacuationRequest_Body_NONE) {
- return nil, status.Error(codes.InvalidArgument, "no evacuation scope")
- }
+ var prm engine.EvacuateShardPrm
+ prm.WithShardIDList(s.getShardIDList(req.GetBody().GetShard_ID()))
+ prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
+ prm.WithFaultHandler(s.replicate)
+ prm.WithAsync(true)
- prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- TreeHandler: s.replicateTree,
- Scope: engine.EvacuateScope(req.GetBody().GetScope()),
- ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
- ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
- RepOneOnly: req.GetBody().GetRepOneOnly(),
- }
-
- if err = s.s.Evacuate(ctx, prm); err != nil {
+ _, err = s.s.Evacuate(ctx, prm)
+ if err != nil {
var logicalErr logicerr.Logical
if errors.As(err, &logicalErr) {
return nil, status.Error(codes.Aborted, err.Error())
@@ -60,7 +36,7 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
Body: &control.StartShardEvacuationResponse_Body{},
}
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -87,7 +63,7 @@ func (s *Server) GetShardEvacuationStatus(ctx context.Context, req *control.GetS
return nil, err
}
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -113,168 +89,9 @@ func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShard
Body: &control.StopShardEvacuationResponse_Body{},
}
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- s.s.ResetEvacuationStatusForShards()
-
- return resp, nil
-}
-
-func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.ResetShardEvacuationStatusRequest) (*control.ResetShardEvacuationStatusResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- err = s.s.ResetEvacuationStatus(ctx)
- if err != nil {
- var logicalErr logicerr.Logical
- if errors.As(err, &logicalErr) {
- return nil, status.Error(codes.Aborted, err.Error())
- }
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.ResetShardEvacuationStatusResponse{
- Body: &control.ResetShardEvacuationStatusResponse_Body{},
- }
-
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}
-
-func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- cid, ok := obj.ContainerID()
- if !ok {
- // Return nil to prevent situations where a shard can't be evacuated
- // because of a single bad/corrupted object.
- return false, nil
- }
-
- nodes, err := s.getContainerNodes(ctx, cid)
- if err != nil {
- return false, err
- }
-
- if len(nodes) == 0 {
- return false, nil
- }
-
- var res replicatorResult
- task := replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Obj: obj,
- Nodes: nodes,
- }
- s.replicator.HandleReplicationTask(ctx, task, &res)
-
- if res.count == 0 {
- return false, errors.New("object was not replicated")
- }
- return true, nil
-}
-
-func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) {
- nodes, err := s.getContainerNodes(ctx, contID)
- if err != nil {
- return false, "", err
- }
- if len(nodes) == 0 {
- return false, "", nil
- }
-
- for _, node := range nodes {
- err = s.replicateTreeToNode(ctx, forest, contID, treeID, node)
- if err == nil {
- return true, hex.EncodeToString(node.PublicKey()), nil
- }
- }
- return false, "", err
-}
-
-func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error {
- rawCID := make([]byte, sha256.Size)
- contID.Encode(rawCID)
-
- var height uint64
- for {
- op, err := forest.TreeGetOpLog(ctx, contID, treeID, height)
- if err != nil {
- return err
- }
-
- if op.Time == 0 {
- return nil
- }
-
- req := &tree.ApplyRequest{
- Body: &tree.ApplyRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Operation: &tree.LogMove{
- ParentId: op.Parent,
- Meta: op.Bytes(),
- ChildId: op.Child,
- },
- },
- }
-
- err = tree.SignMessage(req, s.key)
- if err != nil {
- return fmt.Errorf("can't message apply request: %w", err)
- }
-
- err = s.treeService.ReplicateTreeOp(ctx, node, req)
- if err != nil {
- return err
- }
-
- height = op.Time + 1
- }
-}
-
-func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) {
- nm, err := s.netMapSrc.GetNetMap(ctx, 0)
- if err != nil {
- return nil, err
- }
-
- c, err := s.cnrSrc.Get(ctx, contID)
- if err != nil {
- return nil, err
- }
-
- binCnr := make([]byte, sha256.Size)
- contID.Encode(binCnr)
-
- ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr)
- if err != nil {
- return nil, errFailedToBuildListOfContainerNodes
- }
-
- nodes := placement.FlattenNodes(ns)
- bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
- if bytes.Equal(nodes[i].PublicKey(), bs) {
- copy(nodes[i:], nodes[i+1:])
- nodes = nodes[:len(nodes)-1]
- }
- }
- return nodes, nil
-}
-
-type replicatorResult struct {
- count int
-}
-
-// SubmitSuccessfulReplication implements the replicator.TaskResult interface.
-func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) {
- r.count++
-}
diff --git a/pkg/services/control/server/flush_cache.go b/pkg/services/control/server/flush_cache.go
index 031002d71..9ead530db 100644
--- a/pkg/services/control/server/flush_cache.go
+++ b/pkg/services/control/server/flush_cache.go
@@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -19,7 +18,6 @@ func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest)
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
var prm engine.FlushWriteCachePrm
prm.SetShardID(shardID)
- prm.SetSeal(req.GetBody().GetSeal())
_, err = s.s.FlushWriteCache(ctx, prm)
if err != nil {
@@ -29,7 +27,7 @@ func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest)
resp := &control.FlushCacheResponse{Body: &control.FlushCacheResponse_Body{}}
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index a8ef7809e..d382dd7e5 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -42,7 +41,8 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil {
+ _, err := s.s.Delete(ctx, prm)
+ if err != nil && firstErr == nil {
firstErr = err
}
}
@@ -58,7 +58,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
resp.SetBody(body)
// sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ if err := SignMessage(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go
deleted file mode 100644
index 5e0496910..000000000
--- a/pkg/services/control/server/get_netmap_status.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// GetNetmapStatus gets node status in FrostFS network.
-func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- st, epoch, err := s.nodeState.GetNetmapStatus(ctx)
- if err != nil {
- return nil, err
- }
-
- resp := &control.GetNetmapStatusResponse{
- Body: &control.GetNetmapStatusResponse_Body{
- Status: st,
- Epoch: epoch,
- },
- }
-
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- return resp, nil
-}
diff --git a/pkg/services/control/server/healthcheck.go b/pkg/services/control/server/healthcheck.go
index 121c51280..9e87caa49 100644
--- a/pkg/services/control/server/healthcheck.go
+++ b/pkg/services/control/server/healthcheck.go
@@ -4,7 +4,6 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -28,7 +27,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest)
body.SetHealthStatus(s.healthChecker.HealthStatus())
// sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ if err := SignMessage(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index efe2754ea..a020547a2 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -25,15 +24,15 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
info := s.s.DumpInfo()
- shardInfos := make([]control.ShardInfo, 0, len(info.Shards))
+ shardInfos := make([]*control.ShardInfo, 0, len(info.Shards))
for _, sh := range info.Shards {
si := new(control.ShardInfo)
- si.SetShard_ID(*sh.ID)
+ si.SetID(*sh.ID)
si.SetMetabasePath(sh.MetaBaseInfo.Path)
si.Blobstor = blobstorInfoToProto(sh.BlobStorInfo)
- si.SetWritecachePath(sh.WriteCacheInfo.Path)
+ si.SetWriteCachePath(sh.WriteCacheInfo.Path)
si.SetPiloramaPath(sh.PiloramaInfo.Path)
var m control.ShardMode
@@ -53,25 +52,24 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
- si.SetEvacuationInProgress(sh.EvacuationInProgress)
- shardInfos = append(shardInfos, *si)
+ shardInfos = append(shardInfos, si)
}
body.SetShards(shardInfos)
// sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ if err := SignMessage(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}
-func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo {
- res := make([]control.BlobstorInfo, len(info.SubStorages))
+func blobstorInfoToProto(info blobstor.Info) []*control.BlobstorInfo {
+ res := make([]*control.BlobstorInfo, len(info.SubStorages))
for i := range info.SubStorages {
- res[i] = control.BlobstorInfo{
+ res[i] = &control.BlobstorInfo{
Path: info.SubStorages[i].Path,
Type: info.SubStorages[i].Type,
}
diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go
deleted file mode 100644
index 39565ed50..000000000
--- a/pkg/services/control/server/list_shards_for_object.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- var obj oid.ID
- err = obj.DecodeString(req.GetBody().GetObjectId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- var cnr cid.ID
- err = cnr.DecodeString(req.GetBody().GetContainerId())
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
- resp := new(control.ListShardsForObjectResponse)
- body := new(control.ListShardsForObjectResponse_Body)
- resp.SetBody(body)
-
- var objAddr oid.Address
- objAddr.SetContainer(cnr)
- objAddr.SetObject(obj)
- info, err := s.s.ListShardsForObject(ctx, objAddr)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- if len(info) == 0 {
- return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject)
- }
-
- body.SetShard_ID(shardInfoToProto(info))
-
- // Sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func shardInfoToProto(infos []shard.Info) [][]byte {
- shardInfos := make([][]byte, 0, len(infos))
- for _, info := range infos {
- shardInfos = append(shardInfos, *info.ID)
- }
-
- return shardInfos
-}
diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go
deleted file mode 100644
index ab8258e27..000000000
--- a/pkg/services/control/server/policy_engine.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package control
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func apeTarget(chainTarget *control.ChainTarget) (engine.Target, error) {
- switch chainTarget.GetType() {
- case control.ChainTarget_CONTAINER:
- return engine.ContainerTarget(chainTarget.GetName()), nil
- case control.ChainTarget_NAMESPACE:
- return engine.NamespaceTarget(chainTarget.GetName()), nil
- case control.ChainTarget_USER:
- return engine.UserTarget(chainTarget.GetName()), nil
- case control.ChainTarget_GROUP:
- return engine.GroupTarget(chainTarget.GetName()), nil
- default:
- }
- return engine.Target{}, status.Error(codes.InvalidArgument,
- fmt.Errorf("target type is not supported: %s", chainTarget.GetType().String()).Error())
-}
-
-func controlTarget(chainTarget *engine.Target) (control.ChainTarget, error) {
- switch chainTarget.Type {
- case engine.Container:
- return control.ChainTarget{
- Name: chainTarget.Name,
- Type: control.ChainTarget_CONTAINER,
- }, nil
- case engine.Namespace:
- // If namespace is empty, we take it for root namespace.
- nm := chainTarget.Name
- if nm == "root" {
- nm = ""
- }
- return control.ChainTarget{
- Name: nm,
- Type: control.ChainTarget_NAMESPACE,
- }, nil
- case engine.User:
- return control.ChainTarget{
- Name: chainTarget.Name,
- Type: control.ChainTarget_USER,
- }, nil
- case engine.Group:
- return control.ChainTarget{
- Name: chainTarget.Name,
- Type: control.ChainTarget_GROUP,
- }, nil
- default:
- }
- return control.ChainTarget{}, status.Error(codes.InvalidArgument,
- fmt.Errorf("target type is not supported: %c", chainTarget.Type).Error())
-}
-
-func (s *Server) AddChainLocalOverride(_ context.Context, req *control.AddChainLocalOverrideRequest) (*control.AddChainLocalOverrideResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- var chain apechain.Chain
- if err := chain.DecodeBytes(req.GetBody().GetChain()); err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
- for _, rule := range chain.Rules {
- for _, name := range rule.Resources.Names {
- if err := ape.ValidateResourceName(name); err != nil {
- return nil, status.Error(codes.InvalidArgument, fmt.Errorf("invalid resource: %w", err).Error())
- }
- }
- }
-
- s.apeChainCounter.Add(1)
- // TODO (aarifullin): the such chain id is not well-designed yet.
- if len(chain.ID) == 0 {
- chain.ID = apechain.ID(fmt.Sprintf("%s:%d", apechain.Ingress, s.apeChainCounter.Load()))
- }
-
- target, err := apeTarget(req.GetBody().GetTarget())
- if err != nil {
- return nil, err
- }
-
- if _, err = s.localOverrideStorage.LocalStorage().AddOverride(apechain.Ingress, target, &chain); err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
-
- resp := &control.AddChainLocalOverrideResponse{
- Body: &control.AddChainLocalOverrideResponse_Body{
- ChainId: []byte(chain.ID),
- },
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) GetChainLocalOverride(_ context.Context, req *control.GetChainLocalOverrideRequest) (*control.GetChainLocalOverrideResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- target, err := apeTarget(req.GetBody().GetTarget())
- if err != nil {
- return nil, err
- }
- chain, err := s.localOverrideStorage.LocalStorage().GetOverride(apechain.Ingress, target, apechain.ID(req.GetBody().GetChainId()))
- if err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
-
- resp := &control.GetChainLocalOverrideResponse{
- Body: &control.GetChainLocalOverrideResponse_Body{
- Chain: chain.Bytes(),
- },
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) ListChainLocalOverrides(_ context.Context, req *control.ListChainLocalOverridesRequest) (*control.ListChainLocalOverridesResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- target, err := apeTarget(req.GetBody().GetTarget())
- if err != nil {
- return nil, err
- }
-
- chains, err := s.localOverrideStorage.LocalStorage().ListOverrides(apechain.Ingress, target)
- if err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
- serializedChains := make([][]byte, 0, len(chains))
- for _, chain := range chains {
- serializedChains = append(serializedChains, chain.Bytes())
- }
-
- resp := &control.ListChainLocalOverridesResponse{
- Body: &control.ListChainLocalOverridesResponse_Body{
- Chains: serializedChains,
- },
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) RemoveChainLocalOverride(_ context.Context, req *control.RemoveChainLocalOverrideRequest) (*control.RemoveChainLocalOverrideResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- target, err := apeTarget(req.GetBody().GetTarget())
- if err != nil {
- return nil, err
- }
-
- if err = s.localOverrideStorage.LocalStorage().RemoveOverride(apechain.Ingress, target, req.GetBody().GetChainId()); err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
- resp := &control.RemoveChainLocalOverrideResponse{
- Body: &control.RemoveChainLocalOverrideResponse_Body{},
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) RemoveChainLocalOverridesByTarget(_ context.Context, req *control.RemoveChainLocalOverridesByTargetRequest) (*control.RemoveChainLocalOverridesByTargetResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- target, err := apeTarget(req.GetBody().GetTarget())
- if err != nil {
- return nil, err
- }
-
- if err = s.localOverrideStorage.LocalStorage().RemoveOverridesByTarget(apechain.Ingress, target); err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
- resp := &control.RemoveChainLocalOverridesByTargetResponse{
- Body: &control.RemoveChainLocalOverridesByTargetResponse_Body{},
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListTargetsLocalOverridesRequest) (*control.ListTargetsLocalOverridesResponse, error) {
- if err := s.isValidRequest(req); err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- apeChainName := apechain.Name(req.GetBody().GetChainName())
- apeTargets, err := s.localOverrideStorage.LocalStorage().ListOverrideDefinedTargets(apeChainName)
- if err != nil {
- return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
- }
- targets := make([]control.ChainTarget, 0, len(apeTargets))
- for i := range apeTargets {
- target, err := controlTarget(&apeTargets[i])
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- targets = append(targets, target)
- }
-
- resp := &control.ListTargetsLocalOverridesResponse{
- Body: &control.ListTargetsLocalOverridesResponse_Body{
- Targets: targets,
- },
- }
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
-
-func getCodeByLocalStorageErr(err error) codes.Code {
- if errors.Is(err, engine.ErrChainNotFound) || errors.Is(err, engine.ErrChainNameNotFound) ||
- errors.Is(err, engine.ErrResourceNotFound) {
- return codes.NotFound
- }
- return codes.Internal
-}
diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go
deleted file mode 100644
index 6ddfb8bf4..000000000
--- a/pkg/services/control/server/rebuild.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package control
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 {
- return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit()))
- }
-
- if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 {
- return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent()))
- }
-
- prm := engine.RebuildPrm{
- ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
- ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(),
- TargetFillPercent: req.GetBody().GetTargetFillPercent(),
- }
-
- res, err := s.s.Rebuild(ctx, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}}
- for _, r := range res.ShardResults {
- if r.Success {
- resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
- Shard_ID: *r.ShardID,
- Success: true,
- })
- } else {
- resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
- Shard_ID: *r.ShardID,
- Error: r.ErrorMsg,
- })
- }
- }
-
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
deleted file mode 100644
index 6799bdcac..000000000
--- a/pkg/services/control/server/seal_writecache.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCacheRequest) (*control.SealWriteCacheResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- prm := engine.SealWriteCachePrm{
- ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- Async: req.GetBody().GetAsync(),
- RestoreMode: req.GetBody().GetRestoreMode(),
- Shrink: req.GetBody().GetShrink(),
- }
-
- res, err := s.s.SealWriteCache(ctx, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}}
- for _, r := range res.ShardResults {
- if r.Success {
- resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
- Shard_ID: *r.ShardID,
- Success: true,
- })
- } else {
- resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
- Shard_ID: *r.ShardID,
- Error: r.ErrorMsg,
- })
- }
- }
-
- err = ctrlmessage.Sign(s.key, resp)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index 59d701bc6..a0ad44e23 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -1,39 +1,31 @@
package control
import (
- "context"
"crypto/ecdsa"
- "sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
)
// Server is an entity that serves
// Control service on storage node.
type Server struct {
*cfg
-
- // TODO (aarifullin): this counter is used to assign id for rule chains
- // added as local overrides and will be removed as soon as in-memory
- // implementation will be replaced.
- apeChainCounter atomic.Uint32
}
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // NetmapStatus must calculate and return current status of the node in FrostFS network map.
+ // Must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // HealthStatus must calculate and return current health status of the node application.
+ // Must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
@@ -46,21 +38,11 @@ type NodeState interface {
//
// If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed
// in the network settings, the node additionally starts local maintenance.
- SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error
+ SetNetmapStatus(st control.NetmapStatus) error
// ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE)
// but starts local maintenance regardless of the network settings.
- ForceMaintenance(ctx context.Context) error
-
- GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error)
-}
-
-// LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine
-// interface methods.
-type LocalOverrideStorageDecorator interface {
- // LocalStorage method can be decorated by using sync primitives in the case if the local
- // override storage state should be consistent for chain router.
- LocalStorage() policyengine.LocalOverrideStorage
+ ForceMaintenance() error
}
// Option of the Server's constructor.
@@ -77,8 +59,6 @@ type cfg struct {
cnrSrc container.Source
- localOverrideStorage LocalOverrideStorageDecorator
-
replicator *replicator.Replicator
nodeState NodeState
@@ -171,11 +151,3 @@ func WithTreeService(s TreeService) Option {
c.treeService = s
}
}
-
-// WithLocalOverrideStorage returns the option to set access policy engine
-// chain override storage.
-func WithLocalOverrideStorage(localOverrideStorage LocalOverrideStorageDecorator) Option {
- return func(c *cfg) {
- c.localOverrideStorage = localOverrideStorage
- }
-}
diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go
index 529041dca..d4a856952 100644
--- a/pkg/services/control/server/set_netmap_status.go
+++ b/pkg/services/control/server/set_netmap_status.go
@@ -4,7 +4,6 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -12,7 +11,7 @@ import (
// SetNetmapStatus sets node status in FrostFS network.
//
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
+func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -29,9 +28,9 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
"force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE)
}
- err = s.nodeState.ForceMaintenance(ctx)
+ err = s.nodeState.ForceMaintenance()
} else {
- err = s.nodeState.SetNetmapStatus(ctx, st)
+ err = s.nodeState.SetNetmapStatus(st)
}
if err != nil {
@@ -45,7 +44,7 @@ func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStat
resp.SetBody(body)
// sign the response
- if err := ctrlmessage.Sign(s.key, resp); err != nil {
+ if err := SignMessage(s.key, resp); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go
index 4f8796263..274e2f65d 100644
--- a/pkg/services/control/server/set_shard_mode.go
+++ b/pkg/services/control/server/set_shard_mode.go
@@ -6,12 +6,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
-func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
+func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) {
// verify request
err := s.isValidRequest(req)
if err != nil {
@@ -37,8 +36,8 @@ func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequ
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode))
}
- for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
- err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter())
+ for _, shardID := range s.getShardIDList(req.Body.GetShard_ID()) {
+ err = s.s.SetShardMode(shardID, m, req.Body.GetResetErrorCounter())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -51,7 +50,7 @@ func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequ
resp.SetBody(body)
// sign the response
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go
index 0e8e24b6e..acc405821 100644
--- a/pkg/services/control/server/sign.go
+++ b/pkg/services/control/server/sign.go
@@ -2,17 +2,26 @@ package control
import (
"bytes"
+ "crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
)
+// SignedMessage is an interface of Control service message.
+type SignedMessage interface {
+ ReadSignedData([]byte) ([]byte, error)
+ GetSignature() *control.Signature
+ SetSignature(*control.Signature)
+}
+
var errDisallowedKey = errors.New("key is not in the allowed list")
-func (s *Server) isValidRequest(req ctrlmessage.SignedMessage) error {
+func (s *Server) isValidRequest(req SignedMessage) error {
sign := req.GetSignature()
if sign == nil {
// TODO(@cthulhu-rider): #468 use "const" error
@@ -59,3 +68,30 @@ func (s *Server) isValidRequest(req ctrlmessage.SignedMessage) error {
return nil
}
+
+// SignMessage signs Control service message with private key.
+func SignMessage(key *ecdsa.PrivateKey, msg SignedMessage) error {
+ binBody, err := msg.ReadSignedData(nil)
+ if err != nil {
+ return fmt.Errorf("marshal request body: %w", err)
+ }
+
+ var sig frostfscrypto.Signature
+
+ err = sig.Calculate(frostfsecdsa.Signer(*key), binBody)
+ if err != nil {
+ return fmt.Errorf("calculate signature: %w", err)
+ }
+
+ // TODO(@cthulhu-rider): #468 use Signature message from FrostFS API to avoid conversion
+ var sigV2 refs.Signature
+ sig.WriteToV2(&sigV2)
+
+ var sigControl control.Signature
+ sigControl.SetKey(sigV2.GetKey())
+ sigControl.SetSign(sigV2.GetSign())
+
+ msg.SetSignature(&sigControl)
+
+ return nil
+}
diff --git a/pkg/services/control/server/syncronize_tree.go b/pkg/services/control/server/syncronize_tree.go
index b2a966b2c..dce3e8831 100644
--- a/pkg/services/control/server/syncronize_tree.go
+++ b/pkg/services/control/server/syncronize_tree.go
@@ -4,18 +4,14 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// TreeService represents a tree service instance.
type TreeService interface {
- SynchronizeTree(ctx context.Context, cnr cid.ID, treeID string) error
- ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *tree.ApplyRequest) error
+ Synchronize(ctx context.Context, cnr cid.ID, treeID string) error
}
func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTreeRequest) (*control.SynchronizeTreeResponse, error) {
@@ -35,7 +31,7 @@ func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTr
return nil, status.Error(codes.Internal, err.Error())
}
- err = s.treeService.SynchronizeTree(ctx, cnr, b.GetTreeId())
+ err = s.treeService.Synchronize(ctx, cnr, b.GetTreeId())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -43,7 +39,7 @@ func (s *Server) SynchronizeTree(ctx context.Context, req *control.SynchronizeTr
resp := new(control.SynchronizeTreeResponse)
resp.SetBody(new(control.SynchronizeTreeResponse_Body))
- err = ctrlmessage.Sign(s.key, resp)
+ err = SignMessage(s.key, resp)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go
new file mode 100644
index 000000000..ef0c0a8d2
--- /dev/null
+++ b/pkg/services/control/service.go
@@ -0,0 +1,142 @@
+package control
+
+// SetBody sets health check request body.
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetNetmapStatus sets status of the storage node in FrostFS network map.
+func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
+ if x != nil {
+ x.NetmapStatus = v
+ }
+}
+
+// SetHealthStatus sets health status of the storage node application.
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ if x != nil {
+ x.HealthStatus = v
+ }
+}
+
+// SetBody sets health check response body.
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetStatus sets new storage node status in FrostFS network map.
+func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
+ if x != nil {
+ x.Status = v
+ }
+}
+
+// SetForceMaintenance sets force_maintenance flag in the message.
+func (x *SetNetmapStatusRequest_Body) SetForceMaintenance() {
+ x.ForceMaintenance = true
+}
+
+// SetBody sets body of the set netmap status request .
+func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets set body of the netmap status response.
+func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetAddressList sets list of objects to be removed in FrostFS API binary format.
+func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
+ if x != nil {
+ x.AddressList = v
+ }
+}
+
+// SetBody sets body of the set "Drop objects" request.
+func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets set body of the "Drop objects" response.
+func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets list shards request body.
+func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetShards sets shards of the storage node.
+func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
+ if x != nil {
+ x.Shards = v
+ }
+}
+
+// SetBody sets list shards response body.
+func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetShardIDList sets shard ID whose mode is requested to be set.
+func (x *SetShardModeRequest_Body) SetShardIDList(v [][]byte) {
+ if v != nil {
+ x.Shard_ID = v
+ }
+}
+
+// SetMode sets mode of the shard.
+func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
+ x.Mode = v
+}
+
+// ClearErrorCounter sets flag signifying whether error counter for shard should be cleared.
+func (x *SetShardModeRequest_Body) ClearErrorCounter(reset bool) {
+ x.ResetErrorCounter = reset
+}
+
+// SetBody sets request body.
+func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets body of the set shard mode response.
+func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets list shards request body.
+func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
+
+// SetBody sets list shards response body.
+func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
+ if x != nil {
+ x.Body = v
+ }
+}
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
new file mode 100644
index 000000000..b1bebb1e2
--- /dev/null
+++ b/pkg/services/control/service.pb.go
@@ -0,0 +1,3899 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.9
+// source: pkg/services/control/service.proto
+
+package control
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Evacuate status enum.
+type GetShardEvacuationStatusResponse_Body_Status int32
+
+const (
+ GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0
+ GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1
+ GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2
+)
+
+// Enum value maps for GetShardEvacuationStatusResponse_Body_Status.
+var (
+ GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{
+ 0: "EVACUATE_SHARD_STATUS_UNDEFINED",
+ 1: "RUNNING",
+ 2: "COMPLETED",
+ }
+ GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{
+ "EVACUATE_SHARD_STATUS_UNDEFINED": 0,
+ "RUNNING": 1,
+ "COMPLETED": 2,
+ }
+)
+
+func (x GetShardEvacuationStatusResponse_Body_Status) Enum() *GetShardEvacuationStatusResponse_Body_Status {
+ p := new(GetShardEvacuationStatusResponse_Body_Status)
+ *p = x
+ return p
+}
+
+func (x GetShardEvacuationStatusResponse_Body_Status) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GetShardEvacuationStatusResponse_Body_Status) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_control_service_proto_enumTypes[0].Descriptor()
+}
+
+func (GetShardEvacuationStatusResponse_Body_Status) Type() protoreflect.EnumType {
+ return &file_pkg_services_control_service_proto_enumTypes[0]
+}
+
+func (x GetShardEvacuationStatusResponse_Body_Status) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusResponse_Body_Status.Descriptor instead.
+func (GetShardEvacuationStatusResponse_Body_Status) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0, 0}
+}
+
+// Health check request.
+type HealthCheckRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of health check request message.
+ Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthCheckRequest) Reset() {
+ *x = HealthCheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Health check request.
+type HealthCheckResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of health check response message.
+ Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthCheckResponse) Reset() {
+ *x = HealthCheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Set netmap status request.
+type SetNetmapStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of set netmap status request message.
+ Body *SetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SetNetmapStatusRequest) Reset() {
+ *x = SetNetmapStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetNetmapStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetNetmapStatusRequest) ProtoMessage() {}
+
+func (x *SetNetmapStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetNetmapStatusRequest.ProtoReflect.Descriptor instead.
+func (*SetNetmapStatusRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SetNetmapStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Set netmap status response.
+type SetNetmapStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of set netmap status response message.
+ Body *SetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SetNetmapStatusResponse) Reset() {
+ *x = SetNetmapStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetNetmapStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetNetmapStatusResponse) ProtoMessage() {}
+
+func (x *SetNetmapStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetNetmapStatusResponse.ProtoReflect.Descriptor instead.
+func (*SetNetmapStatusResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SetNetmapStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Request to drop the objects.
+type DropObjectsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of the request message.
+ Body *DropObjectsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DropObjectsRequest) Reset() {
+ *x = DropObjectsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DropObjectsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DropObjectsRequest) ProtoMessage() {}
+
+func (x *DropObjectsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DropObjectsRequest.ProtoReflect.Descriptor instead.
+func (*DropObjectsRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DropObjectsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Response to request to drop the objects.
+type DropObjectsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of the response message.
+ Body *DropObjectsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DropObjectsResponse) Reset() {
+ *x = DropObjectsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DropObjectsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DropObjectsResponse) ProtoMessage() {}
+
+func (x *DropObjectsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DropObjectsResponse.ProtoReflect.Descriptor instead.
+func (*DropObjectsResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DropObjectsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Request to list all shards of the node.
+type ListShardsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of the request message.
+ Body *ListShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *ListShardsRequest) Reset() {
+ *x = ListShardsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListShardsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListShardsRequest) ProtoMessage() {}
+
+func (x *ListShardsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListShardsRequest.ProtoReflect.Descriptor instead.
+func (*ListShardsRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *ListShardsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// ListShards response.
+type ListShardsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of the response message.
+ Body *ListShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *ListShardsResponse) Reset() {
+ *x = ListShardsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListShardsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListShardsResponse) ProtoMessage() {}
+
+func (x *ListShardsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListShardsResponse.ProtoReflect.Descriptor instead.
+func (*ListShardsResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *ListShardsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Request to set mode of the shard.
+type SetShardModeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of set shard mode request message.
+ Body *SetShardModeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SetShardModeRequest) Reset() {
+ *x = SetShardModeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetShardModeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetShardModeRequest) ProtoMessage() {}
+
+func (x *SetShardModeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetShardModeRequest.ProtoReflect.Descriptor instead.
+func (*SetShardModeRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SetShardModeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// SetShardMode response.
+type SetShardModeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of set shard mode response message.
+ Body *SetShardModeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SetShardModeResponse) Reset() {
+ *x = SetShardModeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetShardModeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetShardModeResponse) ProtoMessage() {}
+
+func (x *SetShardModeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetShardModeResponse.ProtoReflect.Descriptor instead.
+func (*SetShardModeResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SetShardModeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// SynchronizeTree request.
+type SynchronizeTreeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of restore shard request message.
+ Body *SynchronizeTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SynchronizeTreeRequest) Reset() {
+ *x = SynchronizeTreeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SynchronizeTreeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SynchronizeTreeRequest) ProtoMessage() {}
+
+func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SynchronizeTreeRequest.ProtoReflect.Descriptor instead.
+func (*SynchronizeTreeRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SynchronizeTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// SynchronizeTree response.
+type SynchronizeTreeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Body of restore shard response message.
+ Body *SynchronizeTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Body signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *SynchronizeTreeResponse) Reset() {
+ *x = SynchronizeTreeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SynchronizeTreeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SynchronizeTreeResponse) ProtoMessage() {}
+
+func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SynchronizeTreeResponse.ProtoReflect.Descriptor instead.
+func (*SynchronizeTreeResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *SynchronizeTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// EvacuateShard request.
+type EvacuateShardRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *EvacuateShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *EvacuateShardRequest) Reset() {
+ *x = EvacuateShardRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvacuateShardRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvacuateShardRequest) ProtoMessage() {}
+
+func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvacuateShardRequest.ProtoReflect.Descriptor instead.
+func (*EvacuateShardRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *EvacuateShardRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// EvacuateShard response.
+type EvacuateShardResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *EvacuateShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *EvacuateShardResponse) Reset() {
+ *x = EvacuateShardResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvacuateShardResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvacuateShardResponse) ProtoMessage() {}
+
+func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvacuateShardResponse.ProtoReflect.Descriptor instead.
+func (*EvacuateShardResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *EvacuateShardResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// FlushCache request.
+type FlushCacheRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *FlushCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *FlushCacheRequest) Reset() {
+ *x = FlushCacheRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FlushCacheRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FlushCacheRequest) ProtoMessage() {}
+
+func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FlushCacheRequest.ProtoReflect.Descriptor instead.
+func (*FlushCacheRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *FlushCacheRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// FlushCache response.
+type FlushCacheResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *FlushCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *FlushCacheResponse) Reset() {
+ *x = FlushCacheResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FlushCacheResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FlushCacheResponse) ProtoMessage() {}
+
+func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FlushCacheResponse.ProtoReflect.Descriptor instead.
+func (*FlushCacheResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *FlushCacheResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Doctor request.
+type DoctorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *DoctorRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DoctorRequest) Reset() {
+ *x = DoctorRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorRequest) ProtoMessage() {}
+
+func (x *DoctorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorRequest.ProtoReflect.Descriptor instead.
+func (*DoctorRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DoctorRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Doctor response.
+type DoctorResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *DoctorResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DoctorResponse) Reset() {
+ *x = DoctorResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorResponse) ProtoMessage() {}
+
+func (x *DoctorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorResponse.ProtoReflect.Descriptor instead.
+func (*DoctorResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DoctorResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// StartShardEvacuation request.
+type StartShardEvacuationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *StartShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *StartShardEvacuationRequest) Reset() {
+ *x = StartShardEvacuationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StartShardEvacuationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StartShardEvacuationRequest) ProtoMessage() {}
+
+func (x *StartShardEvacuationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StartShardEvacuationRequest.ProtoReflect.Descriptor instead.
+func (*StartShardEvacuationRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *StartShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// StartShardEvacuation response.
+type StartShardEvacuationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *StartShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *StartShardEvacuationResponse) Reset() {
+ *x = StartShardEvacuationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StartShardEvacuationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StartShardEvacuationResponse) ProtoMessage() {}
+
+func (x *StartShardEvacuationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StartShardEvacuationResponse.ProtoReflect.Descriptor instead.
+func (*StartShardEvacuationResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *StartShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// GetShardEvacuationStatus request.
+type GetShardEvacuationStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *GetShardEvacuationStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetShardEvacuationStatusRequest) Reset() {
+ *x = GetShardEvacuationStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusRequest) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusRequest.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// GetShardEvacuationStatus response.
+type GetShardEvacuationStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *GetShardEvacuationStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetShardEvacuationStatusResponse) Reset() {
+ *x = GetShardEvacuationStatusResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusResponse) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusResponse.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// StopShardEvacuation request.
+type StopShardEvacuationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *StopShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *StopShardEvacuationRequest) Reset() {
+ *x = StopShardEvacuationRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StopShardEvacuationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StopShardEvacuationRequest) ProtoMessage() {}
+
+func (x *StopShardEvacuationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StopShardEvacuationRequest.ProtoReflect.Descriptor instead.
+func (*StopShardEvacuationRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *StopShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// StopShardEvacuation response.
+type StopShardEvacuationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *StopShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *StopShardEvacuationResponse) Reset() {
+ *x = StopShardEvacuationResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StopShardEvacuationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StopShardEvacuationResponse) ProtoMessage() {}
+
+func (x *StopShardEvacuationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StopShardEvacuationResponse.ProtoReflect.Descriptor instead.
+func (*StopShardEvacuationResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *StopShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Health check request body.
+type HealthCheckRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HealthCheckRequest_Body) Reset() {
+ *x = HealthCheckRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest_Body) ProtoMessage() {}
+
+func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Health check response body
+type HealthCheckResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Status of the storage node in FrostFS network map.
+ NetmapStatus NetmapStatus `protobuf:"varint,1,opt,name=netmap_status,json=netmapStatus,proto3,enum=control.NetmapStatus" json:"netmap_status,omitempty"`
+ // Health status of storage node application.
+ HealthStatus HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=control.HealthStatus" json:"health_status,omitempty"`
+}
+
+func (x *HealthCheckResponse_Body) Reset() {
+ *x = HealthCheckResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse_Body) ProtoMessage() {}
+
+func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
+ if x != nil {
+ return x.NetmapStatus
+ }
+ return NetmapStatus_STATUS_UNDEFINED
+}
+
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return HealthStatus_HEALTH_STATUS_UNDEFINED
+}
+
+// Set netmap status request body.
+type SetNetmapStatusRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // New storage node status in FrostFS network map.
+ // If status is MAINTENANCE, the node checks whether maintenance is
+ // allowed in the network settings. In case of prohibition, the request
+ // is denied. Otherwise, node switches to local maintenance state. To
+ // force local maintenance, use `force_maintenance` flag.
+ Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"`
+ // MAINTENANCE status validation skip flag. If set, node starts local
+ // maintenance regardless of network settings. The flag MUST NOT be
+ // set for any other status.
+ ForceMaintenance bool `protobuf:"varint,2,opt,name=force_maintenance,json=forceMaintenance,proto3" json:"force_maintenance,omitempty"`
+}
+
+func (x *SetNetmapStatusRequest_Body) Reset() {
+ *x = SetNetmapStatusRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetNetmapStatusRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetNetmapStatusRequest_Body) ProtoMessage() {}
+
+func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead.
+func (*SetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
+ if x != nil {
+ return x.Status
+ }
+ return NetmapStatus_STATUS_UNDEFINED
+}
+
+func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
+ if x != nil {
+ return x.ForceMaintenance
+ }
+ return false
+}
+
+// Set netmap status response body
+type SetNetmapStatusResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *SetNetmapStatusResponse_Body) Reset() {
+ *x = SetNetmapStatusResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetNetmapStatusResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetNetmapStatusResponse_Body) ProtoMessage() {}
+
+func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead.
+func (*SetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// Request body structure.
+type DropObjectsRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of object addresses to be removed.
+ // in FrostFS API binary format.
+ AddressList [][]byte `protobuf:"bytes,1,rep,name=address_list,json=addressList,proto3" json:"address_list,omitempty"`
+}
+
+func (x *DropObjectsRequest_Body) Reset() {
+ *x = DropObjectsRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DropObjectsRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DropObjectsRequest_Body) ProtoMessage() {}
+
+func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DropObjectsRequest_Body.ProtoReflect.Descriptor instead.
+func (*DropObjectsRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
+ if x != nil {
+ return x.AddressList
+ }
+ return nil
+}
+
+// Response body structure.
+type DropObjectsResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DropObjectsResponse_Body) Reset() {
+ *x = DropObjectsResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DropObjectsResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DropObjectsResponse_Body) ProtoMessage() {}
+
+func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DropObjectsResponse_Body.ProtoReflect.Descriptor instead.
+func (*DropObjectsResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5, 0}
+}
+
+// Request body structure.
+type ListShardsRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ListShardsRequest_Body) Reset() {
+ *x = ListShardsRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListShardsRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListShardsRequest_Body) ProtoMessage() {}
+
+func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListShardsRequest_Body.ProtoReflect.Descriptor instead.
+func (*ListShardsRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6, 0}
+}
+
+// Response body structure.
+type ListShardsResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of the node's shards.
+ Shards []*ShardInfo `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"`
+}
+
+func (x *ListShardsResponse_Body) Reset() {
+ *x = ListShardsResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListShardsResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListShardsResponse_Body) ProtoMessage() {}
+
+func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListShardsResponse_Body.ProtoReflect.Descriptor instead.
+func (*ListShardsResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7, 0}
+}
+
+func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
+ if x != nil {
+ return x.Shards
+ }
+ return nil
+}
+
+// Request body structure.
+type SetShardModeRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the shard.
+ Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+ // Mode that requested to be set.
+ Mode ShardMode `protobuf:"varint,2,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
+ // Flag signifying whether error counter should be set to 0.
+ ResetErrorCounter bool `protobuf:"varint,3,opt,name=resetErrorCounter,proto3" json:"resetErrorCounter,omitempty"`
+}
+
+func (x *SetShardModeRequest_Body) Reset() {
+ *x = SetShardModeRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetShardModeRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetShardModeRequest_Body) ProtoMessage() {}
+
+func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetShardModeRequest_Body.ProtoReflect.Descriptor instead.
+func (*SetShardModeRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+func (x *SetShardModeRequest_Body) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return ShardMode_SHARD_MODE_UNDEFINED
+}
+
+func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
+ if x != nil {
+ return x.ResetErrorCounter
+ }
+ return false
+}
+
+// Response body structure.
+type SetShardModeResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *SetShardModeResponse_Body) Reset() {
+ *x = SetShardModeResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SetShardModeResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SetShardModeResponse_Body) ProtoMessage() {}
+
+func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SetShardModeResponse_Body.ProtoReflect.Descriptor instead.
+func (*SetShardModeResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9, 0}
+}
+
+// Request body structure.
+type SynchronizeTreeRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // Starting height for the synchronization. Can be omitted.
+ Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+}
+
+func (x *SynchronizeTreeRequest_Body) Reset() {
+ *x = SynchronizeTreeRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SynchronizeTreeRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SynchronizeTreeRequest_Body) ProtoMessage() {}
+
+func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SynchronizeTreeRequest_Body.ProtoReflect.Descriptor instead.
+func (*SynchronizeTreeRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0}
+}
+
+func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+
+// Response body structure.
+type SynchronizeTreeResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *SynchronizeTreeResponse_Body) Reset() {
+ *x = SynchronizeTreeResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SynchronizeTreeResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SynchronizeTreeResponse_Body) ProtoMessage() {}
+
+func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SynchronizeTreeResponse_Body.ProtoReflect.Descriptor instead.
+func (*SynchronizeTreeResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0}
+}
+
+// Request body structure.
+type EvacuateShardRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the shard.
+ Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+ // Flag indicating whether object read errors should be ignored.
+ IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
+}
+
+func (x *EvacuateShardRequest_Body) Reset() {
+ *x = EvacuateShardRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvacuateShardRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvacuateShardRequest_Body) ProtoMessage() {}
+
+func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvacuateShardRequest_Body.ProtoReflect.Descriptor instead.
+func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+
+// Response body structure.
+type EvacuateShardResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *EvacuateShardResponse_Body) Reset() {
+ *x = EvacuateShardResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvacuateShardResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvacuateShardResponse_Body) ProtoMessage() {}
+
+func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvacuateShardResponse_Body.ProtoReflect.Descriptor instead.
+func (*EvacuateShardResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0}
+}
+
+func (x *EvacuateShardResponse_Body) GetCount() uint32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+// Request body structure.
+type FlushCacheRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the shard.
+ Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+}
+
+func (x *FlushCacheRequest_Body) Reset() {
+ *x = FlushCacheRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FlushCacheRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FlushCacheRequest_Body) ProtoMessage() {}
+
+func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FlushCacheRequest_Body.ProtoReflect.Descriptor instead.
+func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0}
+}
+
+func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+// Response body structure.
+type FlushCacheResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *FlushCacheResponse_Body) Reset() {
+ *x = FlushCacheResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FlushCacheResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FlushCacheResponse_Body) ProtoMessage() {}
+
+func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FlushCacheResponse_Body.ProtoReflect.Descriptor instead.
+func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0}
+}
+
+// Request body structure.
+type DoctorRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Number of threads to use for the operation.
+ Concurrency uint32 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // Flag to search engine for duplicate objects and leave only one copy.
+ RemoveDuplicates bool `protobuf:"varint,2,opt,name=remove_duplicates,json=removeDuplicates,proto3" json:"remove_duplicates,omitempty"`
+}
+
+func (x *DoctorRequest_Body) Reset() {
+ *x = DoctorRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorRequest_Body) ProtoMessage() {}
+
+func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorRequest_Body.ProtoReflect.Descriptor instead.
+func (*DoctorRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
+}
+
+func (x *DoctorRequest_Body) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+
+func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
+ if x != nil {
+ return x.RemoveDuplicates
+ }
+ return false
+}
+
+// Response body structure.
+type DoctorResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DoctorResponse_Body) Reset() {
+ *x = DoctorResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorResponse_Body) ProtoMessage() {}
+
+func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorResponse_Body.ProtoReflect.Descriptor instead.
+func (*DoctorResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0}
+}
+
+// Request body structure.
+type StartShardEvacuationRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IDs of the shards.
+ Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+ // Flag indicating whether object read errors should be ignored.
+ IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
+}
+
+func (x *StartShardEvacuationRequest_Body) Reset() {
+ *x = StartShardEvacuationRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StartShardEvacuationRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StartShardEvacuationRequest_Body) ProtoMessage() {}
+
+func (x *StartShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StartShardEvacuationRequest_Body.ProtoReflect.Descriptor instead.
+func (*StartShardEvacuationRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0}
+}
+
+func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+
+// Response body structure.
+type StartShardEvacuationResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StartShardEvacuationResponse_Body) Reset() {
+ *x = StartShardEvacuationResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StartShardEvacuationResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StartShardEvacuationResponse_Body) ProtoMessage() {}
+
+func (x *StartShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StartShardEvacuationResponse_Body.ProtoReflect.Descriptor instead.
+func (*StartShardEvacuationResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0}
+}
+
+// Request body structure.
+type GetShardEvacuationStatusRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GetShardEvacuationStatusRequest_Body) Reset() {
+ *x = GetShardEvacuationStatusRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusRequest_Body) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[44]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusRequest_Body.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0}
+}
+
+// Response body structure.
+type GetShardEvacuationStatusResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Total objects to evacuate count. The value is approximate, so evacuated + failed == total is not guaranteed after completion.
+ Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
+ // Evacuated objects count.
+ Evacuated uint64 `protobuf:"varint,2,opt,name=evacuated,proto3" json:"evacuated,omitempty"`
+ // Failed objects count.
+ Failed uint64 `protobuf:"varint,3,opt,name=failed,proto3" json:"failed,omitempty"`
+ // Shard IDs.
+ Shard_ID [][]byte `protobuf:"bytes,4,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+ // Evacuation process status.
+ Status GetShardEvacuationStatusResponse_Body_Status `protobuf:"varint,5,opt,name=status,proto3,enum=control.GetShardEvacuationStatusResponse_Body_Status" json:"status,omitempty"`
+ // Evacuation process duration.
+ Duration *GetShardEvacuationStatusResponse_Body_Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
+ // Evacuation process started at timestamp.
+ StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"`
+ // Error message if evacuation failed.
+ ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) Reset() {
+ *x = GetShardEvacuationStatusResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusResponse_Body) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[45]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusResponse_Body.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0}
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetTotal() uint64 {
+ if x != nil {
+ return x.Total
+ }
+ return 0
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetEvacuated() uint64 {
+ if x != nil {
+ return x.Evacuated
+ }
+ return 0
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetFailed() uint64 {
+ if x != nil {
+ return x.Failed
+ }
+ return 0
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status {
+ if x != nil {
+ return x.Status
+ }
+ return GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp {
+ if x != nil {
+ return x.StartedAt
+ }
+ return nil
+}
+
+func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+// Unix timestamp value.
+type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) Reset() {
+ *x = GetShardEvacuationStatusResponse_Body_UnixTimestamp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[46]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusResponse_Body_UnixTimestamp.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0, 0}
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+// Duration in seconds.
+type GetShardEvacuationStatusResponse_Body_Duration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_Duration) Reset() {
+ *x = GetShardEvacuationStatusResponse_Body_Duration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_Duration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetShardEvacuationStatusResponse_Body_Duration) ProtoMessage() {}
+
+func (x *GetShardEvacuationStatusResponse_Body_Duration) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[47]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetShardEvacuationStatusResponse_Body_Duration.ProtoReflect.Descriptor instead.
+func (*GetShardEvacuationStatusResponse_Body_Duration) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0, 1}
+}
+
+func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+// Request body structure.
+type StopShardEvacuationRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StopShardEvacuationRequest_Body) Reset() {
+ *x = StopShardEvacuationRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StopShardEvacuationRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StopShardEvacuationRequest_Body) ProtoMessage() {}
+
+func (x *StopShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[48]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StopShardEvacuationRequest_Body.ProtoReflect.Descriptor instead.
+func (*StopShardEvacuationRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22, 0}
+}
+
+// Response body structure.
+type StopShardEvacuationResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StopShardEvacuationResponse_Body) Reset() {
+ *x = StopShardEvacuationResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StopShardEvacuationResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StopShardEvacuationResponse_Body) ProtoMessage() {}
+
+func (x *StopShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[49]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StopShardEvacuationResponse_Body.ProtoReflect.Descriptor instead.
+func (*StopShardEvacuationResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0}
+}
+
+var File_pkg_services_control_service_proto protoreflect.FileDescriptor
+
+var file_pkg_services_control_service_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x20, 0x70,
+ 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x84, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x3a, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6e,
+ 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x68,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x4e,
+ 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
+ 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x10, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e,
+ 0x63, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
+ 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
+ 0x6f, 0x64, 0x79, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x86, 0x01,
+ 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72,
+ 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x12,
+ 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x32, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xf7,
+ 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
+ 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x49, 0x44, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x72, 0x65,
+ 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x53, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
+ 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
+ 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e,
+ 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69,
+ 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x5a, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x16,
+ 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
+ 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a,
+ 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc9, 0x01, 0x0a,
+ 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
+ 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b,
+ 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x75,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x6f, 0x63, 0x74,
+ 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xd6, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22,
+ 0x98, 0x01, 0x0a, 0x1c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x9e, 0x01, 0x0a, 0x1f, 0x47,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc6, 0x05, 0x0a, 0x20,
+ 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x42, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xab, 0x04, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61,
+ 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x4d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x55,
+ 0x6e, 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x25, 0x0a, 0x0d,
+ 0x55, 0x6e, 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x1a, 0x24, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x49, 0x0a, 0x06, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x56, 0x41, 0x43, 0x55, 0x41, 0x54, 0x45, 0x5f,
+ 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44,
+ 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e,
+ 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54,
+ 0x45, 0x44, 0x10, 0x02, 0x22, 0x94, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x96, 0x01, 0x0a, 0x1b,
+ 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
+ 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04,
+ 0x42, 0x6f, 0x64, 0x79, 0x32, 0xee, 0x07, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72,
+ 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12,
+ 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
+ 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72,
+ 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54,
+ 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74,
+ 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68,
+ 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73,
+ 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63,
+ 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f,
+ 0x63, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
+ 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f,
+ 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e,
+ 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_control_service_proto_rawDescOnce sync.Once
+ file_pkg_services_control_service_proto_rawDescData = file_pkg_services_control_service_proto_rawDesc
+)
+
+func file_pkg_services_control_service_proto_rawDescGZIP() []byte {
+ file_pkg_services_control_service_proto_rawDescOnce.Do(func() {
+ file_pkg_services_control_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_service_proto_rawDescData)
+ })
+ return file_pkg_services_control_service_proto_rawDescData
+}
+
+var file_pkg_services_control_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 50)
+var file_pkg_services_control_service_proto_goTypes = []interface{}{
+ (GetShardEvacuationStatusResponse_Body_Status)(0), // 0: control.GetShardEvacuationStatusResponse.Body.Status
+ (*HealthCheckRequest)(nil), // 1: control.HealthCheckRequest
+ (*HealthCheckResponse)(nil), // 2: control.HealthCheckResponse
+ (*SetNetmapStatusRequest)(nil), // 3: control.SetNetmapStatusRequest
+ (*SetNetmapStatusResponse)(nil), // 4: control.SetNetmapStatusResponse
+ (*DropObjectsRequest)(nil), // 5: control.DropObjectsRequest
+ (*DropObjectsResponse)(nil), // 6: control.DropObjectsResponse
+ (*ListShardsRequest)(nil), // 7: control.ListShardsRequest
+ (*ListShardsResponse)(nil), // 8: control.ListShardsResponse
+ (*SetShardModeRequest)(nil), // 9: control.SetShardModeRequest
+ (*SetShardModeResponse)(nil), // 10: control.SetShardModeResponse
+ (*SynchronizeTreeRequest)(nil), // 11: control.SynchronizeTreeRequest
+ (*SynchronizeTreeResponse)(nil), // 12: control.SynchronizeTreeResponse
+ (*EvacuateShardRequest)(nil), // 13: control.EvacuateShardRequest
+ (*EvacuateShardResponse)(nil), // 14: control.EvacuateShardResponse
+ (*FlushCacheRequest)(nil), // 15: control.FlushCacheRequest
+ (*FlushCacheResponse)(nil), // 16: control.FlushCacheResponse
+ (*DoctorRequest)(nil), // 17: control.DoctorRequest
+ (*DoctorResponse)(nil), // 18: control.DoctorResponse
+ (*StartShardEvacuationRequest)(nil), // 19: control.StartShardEvacuationRequest
+ (*StartShardEvacuationResponse)(nil), // 20: control.StartShardEvacuationResponse
+ (*GetShardEvacuationStatusRequest)(nil), // 21: control.GetShardEvacuationStatusRequest
+ (*GetShardEvacuationStatusResponse)(nil), // 22: control.GetShardEvacuationStatusResponse
+ (*StopShardEvacuationRequest)(nil), // 23: control.StopShardEvacuationRequest
+ (*StopShardEvacuationResponse)(nil), // 24: control.StopShardEvacuationResponse
+ (*HealthCheckRequest_Body)(nil), // 25: control.HealthCheckRequest.Body
+ (*HealthCheckResponse_Body)(nil), // 26: control.HealthCheckResponse.Body
+ (*SetNetmapStatusRequest_Body)(nil), // 27: control.SetNetmapStatusRequest.Body
+ (*SetNetmapStatusResponse_Body)(nil), // 28: control.SetNetmapStatusResponse.Body
+ (*DropObjectsRequest_Body)(nil), // 29: control.DropObjectsRequest.Body
+ (*DropObjectsResponse_Body)(nil), // 30: control.DropObjectsResponse.Body
+ (*ListShardsRequest_Body)(nil), // 31: control.ListShardsRequest.Body
+ (*ListShardsResponse_Body)(nil), // 32: control.ListShardsResponse.Body
+ (*SetShardModeRequest_Body)(nil), // 33: control.SetShardModeRequest.Body
+ (*SetShardModeResponse_Body)(nil), // 34: control.SetShardModeResponse.Body
+ (*SynchronizeTreeRequest_Body)(nil), // 35: control.SynchronizeTreeRequest.Body
+ (*SynchronizeTreeResponse_Body)(nil), // 36: control.SynchronizeTreeResponse.Body
+ (*EvacuateShardRequest_Body)(nil), // 37: control.EvacuateShardRequest.Body
+ (*EvacuateShardResponse_Body)(nil), // 38: control.EvacuateShardResponse.Body
+ (*FlushCacheRequest_Body)(nil), // 39: control.FlushCacheRequest.Body
+ (*FlushCacheResponse_Body)(nil), // 40: control.FlushCacheResponse.Body
+ (*DoctorRequest_Body)(nil), // 41: control.DoctorRequest.Body
+ (*DoctorResponse_Body)(nil), // 42: control.DoctorResponse.Body
+ (*StartShardEvacuationRequest_Body)(nil), // 43: control.StartShardEvacuationRequest.Body
+ (*StartShardEvacuationResponse_Body)(nil), // 44: control.StartShardEvacuationResponse.Body
+ (*GetShardEvacuationStatusRequest_Body)(nil), // 45: control.GetShardEvacuationStatusRequest.Body
+ (*GetShardEvacuationStatusResponse_Body)(nil), // 46: control.GetShardEvacuationStatusResponse.Body
+ (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil), // 47: control.GetShardEvacuationStatusResponse.Body.UnixTimestamp
+ (*GetShardEvacuationStatusResponse_Body_Duration)(nil), // 48: control.GetShardEvacuationStatusResponse.Body.Duration
+ (*StopShardEvacuationRequest_Body)(nil), // 49: control.StopShardEvacuationRequest.Body
+ (*StopShardEvacuationResponse_Body)(nil), // 50: control.StopShardEvacuationResponse.Body
+ (*Signature)(nil), // 51: control.Signature
+ (NetmapStatus)(0), // 52: control.NetmapStatus
+ (HealthStatus)(0), // 53: control.HealthStatus
+ (*ShardInfo)(nil), // 54: control.ShardInfo
+ (ShardMode)(0), // 55: control.ShardMode
+}
+var file_pkg_services_control_service_proto_depIdxs = []int32{
+ 25, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
+ 51, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
+ 26, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
+ 51, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
+ 27, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
+ 51, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
+ 28, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
+ 51, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
+ 29, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
+ 51, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
+ 30, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
+ 51, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
+ 31, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
+ 51, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
+ 32, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
+ 51, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
+ 33, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
+ 51, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
+ 34, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
+ 51, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
+ 35, // 20: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
+ 51, // 21: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
+ 36, // 22: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
+ 51, // 23: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
+ 37, // 24: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
+ 51, // 25: control.EvacuateShardRequest.signature:type_name -> control.Signature
+ 38, // 26: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
+ 51, // 27: control.EvacuateShardResponse.signature:type_name -> control.Signature
+ 39, // 28: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
+ 51, // 29: control.FlushCacheRequest.signature:type_name -> control.Signature
+ 40, // 30: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
+ 51, // 31: control.FlushCacheResponse.signature:type_name -> control.Signature
+ 41, // 32: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body
+ 51, // 33: control.DoctorRequest.signature:type_name -> control.Signature
+ 42, // 34: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body
+ 51, // 35: control.DoctorResponse.signature:type_name -> control.Signature
+ 43, // 36: control.StartShardEvacuationRequest.body:type_name -> control.StartShardEvacuationRequest.Body
+ 51, // 37: control.StartShardEvacuationRequest.signature:type_name -> control.Signature
+ 44, // 38: control.StartShardEvacuationResponse.body:type_name -> control.StartShardEvacuationResponse.Body
+ 51, // 39: control.StartShardEvacuationResponse.signature:type_name -> control.Signature
+ 45, // 40: control.GetShardEvacuationStatusRequest.body:type_name -> control.GetShardEvacuationStatusRequest.Body
+ 51, // 41: control.GetShardEvacuationStatusRequest.signature:type_name -> control.Signature
+ 46, // 42: control.GetShardEvacuationStatusResponse.body:type_name -> control.GetShardEvacuationStatusResponse.Body
+ 51, // 43: control.GetShardEvacuationStatusResponse.signature:type_name -> control.Signature
+ 49, // 44: control.StopShardEvacuationRequest.body:type_name -> control.StopShardEvacuationRequest.Body
+ 51, // 45: control.StopShardEvacuationRequest.signature:type_name -> control.Signature
+ 50, // 46: control.StopShardEvacuationResponse.body:type_name -> control.StopShardEvacuationResponse.Body
+ 51, // 47: control.StopShardEvacuationResponse.signature:type_name -> control.Signature
+ 52, // 48: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
+ 53, // 49: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
+ 52, // 50: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
+ 54, // 51: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
+ 55, // 52: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
+ 0, // 53: control.GetShardEvacuationStatusResponse.Body.status:type_name -> control.GetShardEvacuationStatusResponse.Body.Status
+ 48, // 54: control.GetShardEvacuationStatusResponse.Body.duration:type_name -> control.GetShardEvacuationStatusResponse.Body.Duration
+ 47, // 55: control.GetShardEvacuationStatusResponse.Body.started_at:type_name -> control.GetShardEvacuationStatusResponse.Body.UnixTimestamp
+ 1, // 56: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
+ 3, // 57: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
+ 5, // 58: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
+ 7, // 59: control.ControlService.ListShards:input_type -> control.ListShardsRequest
+ 9, // 60: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
+ 11, // 61: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
+ 13, // 62: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
+ 19, // 63: control.ControlService.StartShardEvacuation:input_type -> control.StartShardEvacuationRequest
+ 21, // 64: control.ControlService.GetShardEvacuationStatus:input_type -> control.GetShardEvacuationStatusRequest
+ 23, // 65: control.ControlService.StopShardEvacuation:input_type -> control.StopShardEvacuationRequest
+ 15, // 66: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
+ 17, // 67: control.ControlService.Doctor:input_type -> control.DoctorRequest
+ 2, // 68: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
+ 4, // 69: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
+ 6, // 70: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
+ 8, // 71: control.ControlService.ListShards:output_type -> control.ListShardsResponse
+ 10, // 72: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
+ 12, // 73: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
+ 14, // 74: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
+ 20, // 75: control.ControlService.StartShardEvacuation:output_type -> control.StartShardEvacuationResponse
+ 22, // 76: control.ControlService.GetShardEvacuationStatus:output_type -> control.GetShardEvacuationStatusResponse
+ 24, // 77: control.ControlService.StopShardEvacuation:output_type -> control.StopShardEvacuationResponse
+ 16, // 78: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
+ 18, // 79: control.ControlService.Doctor:output_type -> control.DoctorResponse
+ 68, // [68:80] is the sub-list for method output_type
+ 56, // [56:68] is the sub-list for method input_type
+ 56, // [56:56] is the sub-list for extension type_name
+ 56, // [56:56] is the sub-list for extension extendee
+ 0, // [0:56] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_control_service_proto_init() }
+func file_pkg_services_control_service_proto_init() {
+ if File_pkg_services_control_service_proto != nil {
+ return
+ }
+ file_pkg_services_control_types_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_control_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetNetmapStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetNetmapStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DropObjectsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DropObjectsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListShardsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListShardsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetShardModeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetShardModeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SynchronizeTreeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SynchronizeTreeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvacuateShardRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvacuateShardResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlushCacheRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlushCacheResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StartShardEvacuationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StartShardEvacuationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StopShardEvacuationRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StopShardEvacuationResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetNetmapStatusRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetNetmapStatusResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DropObjectsRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DropObjectsResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListShardsRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListShardsResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetShardModeRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SetShardModeResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SynchronizeTreeRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SynchronizeTreeResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvacuateShardRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvacuateShardResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlushCacheRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlushCacheResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StartShardEvacuationRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StartShardEvacuationResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusResponse_Body_UnixTimestamp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardEvacuationStatusResponse_Body_Duration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StopShardEvacuationRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StopShardEvacuationResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_control_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 50,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_pkg_services_control_service_proto_goTypes,
+ DependencyIndexes: file_pkg_services_control_service_proto_depIdxs,
+ EnumInfos: file_pkg_services_control_service_proto_enumTypes,
+ MessageInfos: file_pkg_services_control_service_proto_msgTypes,
+ }.Build()
+ File_pkg_services_control_service_proto = out.File
+ file_pkg_services_control_service_proto_rawDesc = nil
+ file_pkg_services_control_service_proto_goTypes = nil
+ file_pkg_services_control_service_proto_depIdxs = nil
+}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 4c539acfc..a80deb2da 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -6,749 +6,399 @@ import "pkg/services/control/types.proto";
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control";
-// `ControlService` provides an interface for internal work with the storage
-// node.
+// `ControlService` provides an interface for internal work with the storage node.
service ControlService {
- // Performs health check of the storage node.
- rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
+ // Performs health check of the storage node.
+ rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse);
- // Sets status of the storage node in FrostFS network map.
- rpc SetNetmapStatus(SetNetmapStatusRequest) returns (SetNetmapStatusResponse);
+ // Sets status of the storage node in FrostFS network map.
+ rpc SetNetmapStatus (SetNetmapStatusRequest) returns (SetNetmapStatusResponse);
- // Gets status of the storage node in FrostFS network map.
- rpc GetNetmapStatus(GetNetmapStatusRequest) returns (GetNetmapStatusResponse);
+ // Mark objects to be removed from node's local object storage.
+ rpc DropObjects (DropObjectsRequest) returns (DropObjectsResponse);
- // Mark objects to be removed from node's local object storage.
- rpc DropObjects(DropObjectsRequest) returns (DropObjectsResponse);
+ // Returns list that contains information about all shards of a node.
+ rpc ListShards (ListShardsRequest) returns (ListShardsResponse);
- // Returns list that contains information about all shards of a node.
- rpc ListShards(ListShardsRequest) returns (ListShardsResponse);
+ // Sets mode of the shard.
+ rpc SetShardMode (SetShardModeRequest) returns (SetShardModeResponse);
- // Sets mode of the shard.
- rpc SetShardMode(SetShardModeRequest) returns (SetShardModeResponse);
+ // Synchronizes all log operations for the specified tree.
+ rpc SynchronizeTree (SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
- // Synchronizes all log operations for the specified tree.
- rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ rpc EvacuateShard (EvacuateShardRequest) returns (EvacuateShardResponse);
- // StartShardEvacuation starts moving all data from one shard to the others.
- rpc StartShardEvacuation(StartShardEvacuationRequest)
- returns (StartShardEvacuationResponse);
+ // StartShardEvacuation starts moving all data from one shard to the others.
+ rpc StartShardEvacuation (StartShardEvacuationRequest) returns (StartShardEvacuationResponse);
- // GetShardEvacuationStatus returns evacuation status.
- rpc GetShardEvacuationStatus(GetShardEvacuationStatusRequest)
- returns (GetShardEvacuationStatusResponse);
+ // GetShardEvacuationStatus returns evacuation status.
+ rpc GetShardEvacuationStatus (GetShardEvacuationStatusRequest) returns (GetShardEvacuationStatusResponse);
- // ResetShardEvacuationStatus resets evacuation status if there is no running
- // evacuation process.
- rpc ResetShardEvacuationStatus(ResetShardEvacuationStatusRequest)
- returns (ResetShardEvacuationStatusResponse);
+ // StopShardEvacuation stops moving all data from one shard to the others.
+ rpc StopShardEvacuation (StopShardEvacuationRequest) returns (StopShardEvacuationResponse);
- // StopShardEvacuation stops moving all data from one shard to the others.
- rpc StopShardEvacuation(StopShardEvacuationRequest)
- returns (StopShardEvacuationResponse);
+ // FlushCache moves all data from one shard to the others.
+ rpc FlushCache (FlushCacheRequest) returns (FlushCacheResponse);
- // FlushCache moves all data from one shard to the others.
- rpc FlushCache(FlushCacheRequest) returns (FlushCacheResponse);
-
- // Doctor performs storage restructuring operations on engine.
- rpc Doctor(DoctorRequest) returns (DoctorResponse);
-
- // Add local access policy engine overrides to a node.
- rpc AddChainLocalOverride(AddChainLocalOverrideRequest)
- returns (AddChainLocalOverrideResponse);
-
- // Get local access policy engine overrides stored in the node by chain id.
- rpc GetChainLocalOverride(GetChainLocalOverrideRequest)
- returns (GetChainLocalOverrideResponse);
-
- // List local access policy engine overrides stored in the node by container
- // id.
- rpc ListChainLocalOverrides(ListChainLocalOverridesRequest)
- returns (ListChainLocalOverridesResponse);
-
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- rpc RemoveChainLocalOverride(RemoveChainLocalOverrideRequest)
- returns (RemoveChainLocalOverrideResponse);
-
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- rpc RemoveChainLocalOverridesByTarget(
- RemoveChainLocalOverridesByTargetRequest)
- returns (RemoveChainLocalOverridesByTargetResponse);
-
- // List targets of the local APE overrides stored in the node.
- rpc ListTargetsLocalOverrides(ListTargetsLocalOverridesRequest)
- returns (ListTargetsLocalOverridesResponse);
-
- // Flush objects from write-cache and move it to degraded read only mode.
- rpc SealWriteCache(SealWriteCacheRequest) returns (SealWriteCacheResponse);
-
- // DetachShards detaches and closes shards.
- rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse);
-
- // StartShardRebuild starts shard rebuild process.
- rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
-
- // ListShardsForObject returns shard info where object is stored.
- rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse);
+ // Doctor performs storage restructuring operations on engine.
+ rpc Doctor (DoctorRequest) returns (DoctorResponse);
}
// Health check request.
message HealthCheckRequest {
- // Health check request body.
- message Body {}
+ // Health check request body.
+ message Body {
+ }
- // Body of health check request message.
- Body body = 1;
+ // Body of health check request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Health check request.
message HealthCheckResponse {
- // Health check response body
- message Body {
- // Status of the storage node in FrostFS network map.
- NetmapStatus netmap_status = 1;
+ // Health check response body
+ message Body {
+ // Status of the storage node in FrostFS network map.
+ NetmapStatus netmap_status = 1;
- // Health status of storage node application.
- HealthStatus health_status = 2;
- }
+ // Health status of storage node application.
+ HealthStatus health_status = 2;
+ }
- // Body of health check response message.
- Body body = 1;
+ // Body of health check response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Set netmap status request.
message SetNetmapStatusRequest {
- // Set netmap status request body.
- message Body {
- // New storage node status in FrostFS network map.
- // If status is MAINTENANCE, the node checks whether maintenance is
- // allowed in the network settings. In case of prohibition, the request
- // is denied. Otherwise, node switches to local maintenance state. To
- // force local maintenance, use `force_maintenance` flag.
- NetmapStatus status = 1;
+ // Set netmap status request body.
+ message Body {
+ // New storage node status in FrostFS network map.
+ // If status is MAINTENANCE, the node checks whether maintenance is
+ // allowed in the network settings. In case of prohibition, the request
+ // is denied. Otherwise, node switches to local maintenance state. To
+ // force local maintenance, use `force_maintenance` flag.
+ NetmapStatus status = 1;
- // MAINTENANCE status validation skip flag. If set, node starts local
- // maintenance regardless of network settings. The flag MUST NOT be
- // set for any other status.
- bool force_maintenance = 2;
- }
+ // MAINTENANCE status validation skip flag. If set, node starts local
+ // maintenance regardless of network settings. The flag MUST NOT be
+ // set for any other status.
+ bool force_maintenance = 2;
+ }
- // Body of set netmap status request message.
- Body body = 1;
+ // Body of set netmap status request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Set netmap status response.
message SetNetmapStatusResponse {
- // Set netmap status response body
- message Body {}
+ // Set netmap status response body
+ message Body {
+ }
- // Body of set netmap status response message.
- Body body = 1;
+ // Body of set netmap status response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
-}
-
-// Get netmap status request.
-message GetNetmapStatusRequest {
- message Body {}
-
- // Body of set netmap status request message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-// Get netmap status response.
-message GetNetmapStatusResponse {
- message Body {
- // Storage node status in FrostFS network map.
- NetmapStatus status = 1;
-
- // Network map epoch.
- uint64 epoch = 2;
- }
-
- // Body of get netmap status response message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Request to drop the objects.
message DropObjectsRequest {
- // Request body structure.
- message Body {
- // List of object addresses to be removed.
- // in FrostFS API binary format.
- repeated bytes address_list = 1;
- }
+ // Request body structure.
+ message Body {
+ // List of object addresses to be removed.
+ // in FrostFS API binary format.
+ repeated bytes address_list = 1;
+ }
- // Body of the request message.
- Body body = 1;
+ // Body of the request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Response to request to drop the objects.
message DropObjectsResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {
+ }
- // Body of the response message.
- Body body = 1;
+ // Body of the response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Request to list all shards of the node.
message ListShardsRequest {
- // Request body structure.
- message Body {}
+ // Request body structure.
+ message Body {
+ }
- // Body of the request message.
- Body body = 1;
+ // Body of the request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// ListShards response.
message ListShardsResponse {
- // Response body structure.
- message Body {
- // List of the node's shards.
- repeated ShardInfo shards = 1;
- }
+ // Response body structure.
+ message Body {
+ // List of the node's shards.
+ repeated ShardInfo shards = 1;
+ }
- // Body of the response message.
- Body body = 1;
+ // Body of the response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// Request to set mode of the shard.
message SetShardModeRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
- // Mode that requested to be set.
- ShardMode mode = 2;
+ // Mode that requested to be set.
+ ShardMode mode = 2;
- // Flag signifying whether error counter should be set to 0.
- bool resetErrorCounter = 3;
- }
+ // Flag signifying whether error counter should be set to 0.
+ bool resetErrorCounter = 3;
+ }
- // Body of set shard mode request message.
- Body body = 1;
+ // Body of set shard mode request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SetShardMode response.
message SetShardModeResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {
+ }
- // Body of set shard mode response message.
- Body body = 1;
+ // Body of set shard mode response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SynchronizeTree request.
message SynchronizeTreeRequest {
- // Request body structure.
- message Body {
- bytes container_id = 1;
- string tree_id = 2;
- // Starting height for the synchronization. Can be omitted.
- uint64 height = 3;
- }
+ // Request body structure.
+ message Body {
+ bytes container_id = 1;
+ string tree_id = 2;
+ // Starting height for the synchronization. Can be omitted.
+ uint64 height = 3;
+ }
- // Body of restore shard request message.
- Body body = 1;
+ // Body of restore shard request message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
// SynchronizeTree response.
message SynchronizeTreeResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {
+ }
- // Body of restore shard response message.
- Body body = 1;
+ // Body of restore shard response message.
+ Body body = 1;
- // Body signature.
- Signature signature = 2;
+ // Body signature.
+ Signature signature = 2;
}
+
// EvacuateShard request.
message EvacuateShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 2;
- }
+ // Flag indicating whether object read errors should be ignored.
+ bool ignore_errors = 2;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// EvacuateShard response.
message EvacuateShardResponse {
- // Response body structure.
- message Body { uint32 count = 1; }
+ // Response body structure.
+ message Body {
+ uint32 count = 1;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// FlushCache request.
message FlushCacheRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
- // If true, then writecache will be left in read-only mode after flush
- // completed.
- bool seal = 2;
- }
+ // Request body structure.
+ message Body {
+ // ID of the shard.
+ repeated bytes shard_ID = 1;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// FlushCache response.
message FlushCacheResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
+
// Doctor request.
message DoctorRequest {
- // Request body structure.
- message Body {
- // Number of threads to use for the operation.
- uint32 concurrency = 1;
- // Flag to search engine for duplicate objects and leave only one copy.
- bool remove_duplicates = 2;
- }
+ // Request body structure.
+ message Body {
+ // Number of threads to use for the operation.
+ uint32 concurrency = 1;
+ // Flag to search engine for duplicate objects and leave only one copy.
+ bool remove_duplicates = 2;
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// Doctor response.
message DoctorResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {
+ }
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// StartShardEvacuation request.
message StartShardEvacuationRequest {
- // Request body structure.
- message Body {
- enum Scope {
- NONE = 0;
- OBJECTS = 1;
- TREES = 2;
+ // Request body structure.
+ message Body {
+ // IDs of the shards.
+ repeated bytes shard_ID = 1;
+ // Flag indicating whether object read errors should be ignored.
+ bool ignore_errors = 2;
}
- // IDs of the shards.
- repeated bytes shard_ID = 1;
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 2;
- // Evacuation scope.
- uint32 scope = 3;
- // Count of concurrent container evacuation workers.
- uint32 container_worker_count = 4;
- // Count of concurrent object evacuation workers.
- uint32 object_worker_count = 5;
- // Choose for evacuation objects in `REP 1` containers only.
- bool rep_one_only = 6;
- }
-
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// StartShardEvacuation response.
message StartShardEvacuationResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {}
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// GetShardEvacuationStatus request.
message GetShardEvacuationStatusRequest {
- // Request body structure.
- message Body {}
+ // Request body structure.
+ message Body {}
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// GetShardEvacuationStatus response.
message GetShardEvacuationStatusResponse {
- // Response body structure.
- message Body {
- // Evacuate status enum.
- enum Status {
- EVACUATE_SHARD_STATUS_UNDEFINED = 0;
- RUNNING = 1;
- COMPLETED = 2;
+ // Response body structure.
+ message Body {
+ // Evacuate status enum.
+ enum Status {
+ EVACUATE_SHARD_STATUS_UNDEFINED = 0;
+ RUNNING = 1;
+ COMPLETED = 2;
+ }
+
+ // Unix timestamp value.
+ message UnixTimestamp {
+ int64 value = 1;
+ }
+
+ // Duration in seconds.
+ message Duration {
+ int64 seconds = 1;
+ }
+
+ // Total objects to evacuate count. The value is approximate, so evacuated + failed == total is not guaranteed after completion.
+ uint64 total = 1;
+ // Evacuated objects count.
+ uint64 evacuated = 2;
+ // Failed objects count.
+ uint64 failed = 3;
+
+ // Shard IDs.
+ repeated bytes shard_ID = 4;
+ // Evacuation process status.
+ Status status = 5;
+ // Evacuation process duration.
+ Duration duration = 6;
+ // Evacuation process started at timestamp.
+ UnixTimestamp started_at = 7;
+ // Error message if evacuation failed.
+ string error_message = 8;
}
- // Unix timestamp value.
- message UnixTimestamp { int64 value = 1; }
-
- // Duration in seconds.
- message Duration { int64 seconds = 1; }
-
- // Total objects to evacuate count. The value is approximate, so evacuated +
- // failed + skipped == total is not guaranteed after completion.
- uint64 total_objects = 1;
- // Evacuated objects count.
- uint64 evacuated_objects = 2;
- // Failed objects count.
- uint64 failed_objects = 3;
-
- // Shard IDs.
- repeated bytes shard_ID = 4;
- // Evacuation process status.
- Status status = 5;
- // Evacuation process duration.
- Duration duration = 6;
- // Evacuation process started at timestamp.
- UnixTimestamp started_at = 7;
- // Error message if evacuation failed.
- string error_message = 8;
-
- // Skipped objects count.
- uint64 skipped_objects = 9;
-
- // Total trees to evacuate count.
- uint64 total_trees = 10;
- // Evacuated trees count.
- uint64 evacuated_trees = 11;
- // Failed trees count.
- uint64 failed_trees = 12;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-// ResetShardEvacuationStatus request.
-message ResetShardEvacuationStatusRequest {
- message Body {}
-
- Body body = 1;
- Signature signature = 2;
-}
-
-// ResetShardEvacuationStatus response.
-message ResetShardEvacuationStatusResponse {
- message Body {}
-
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// StopShardEvacuation request.
message StopShardEvacuationRequest {
- // Request body structure.
- message Body {}
+ // Request body structure.
+ message Body {}
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
// StopShardEvacuation response.
message StopShardEvacuationResponse {
- // Response body structure.
- message Body {}
+ // Response body structure.
+ message Body {}
- Body body = 1;
- Signature signature = 2;
-}
-
-// AddChainLocalOverride request.
-message AddChainLocalOverrideRequest {
- message Body {
- // Target for which the overrides are applied.
- ChainTarget target = 1;
-
- // Serialized rule chain. If chain ID is left empty
- // in the chain, then it will be generated and returned
- // in the response.
- bytes chain = 2;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// AddChainLocalOverride response.
-message AddChainLocalOverrideResponse {
- message Body {
- // Chain ID assigned for the added rule chain.
- // If chain ID is left empty in the request, then
- // it will be generated.
- bytes chain_id = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// GetChainLocalOverride request.
-message GetChainLocalOverrideRequest {
- message Body {
- // Target for which the overrides are applied.
- ChainTarget target = 1;
-
- // Chain ID assigned for the added rule chain.
- bytes chain_id = 2;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// GetChainLocalOverride response.
-message GetChainLocalOverrideResponse {
- message Body {
- // Serialized rule chain.
- bytes chain = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// ListChainLocalOverrides request.
-message ListChainLocalOverridesRequest {
- message Body {
- // Target for which the overrides are applied.
- ChainTarget target = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// ListChainLocalOverrides response.
-message ListChainLocalOverridesResponse {
- message Body {
- // The list of serialized rule chain.
- repeated bytes chains = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// ListTargetsLocalOverrides request.
-message ListTargetsLocalOverridesRequest {
- message Body {
- // Target for which the overrides are applied.
- string chainName = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-// ListTargetsLocalOverrides response.
-message ListTargetsLocalOverridesResponse {
- message Body {
- // The list of chain targets.
- repeated ChainTarget targets = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message RemoveChainLocalOverrideRequest {
- message Body {
- // Target for which the overrides are applied.
- ChainTarget target = 1;
-
- // Chain ID assigned for the added rule chain.
- bytes chain_id = 2;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message RemoveChainLocalOverrideResponse {
- message Body {}
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message RemoveChainLocalOverridesByTargetRequest {
- message Body {
- // Target for which the overrides are applied.
- ChainTarget target = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message RemoveChainLocalOverridesByTargetResponse {
- message Body {}
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message SealWriteCacheRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- repeated bytes shard_ID = 1;
-
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 2;
-
- // Flag indicating whether writecache will be sealed async.
- bool async = 3;
-
- // If true, then writecache will be sealed, but mode will be restored to the current one.
- bool restore_mode = 4;
-
- // If true, then writecache will shrink internal storage.
- bool shrink = 5;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message SealWriteCacheResponse {
- message Body {
- message Status {
- bytes shard_ID = 1;
- bool success = 2;
- string error = 3;
- }
- repeated Status results = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message DetachShardsRequest {
- message Body { repeated bytes shard_ID = 1; }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message DetachShardsResponse {
- message Body {}
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message StartShardRebuildRequest {
- message Body {
- repeated bytes shard_ID = 1;
- uint32 target_fill_percent = 2;
- uint32 concurrency_limit = 3;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message StartShardRebuildResponse {
- message Body {
- message Status {
- bytes shard_ID = 1;
- bool success = 2;
- string error = 3;
- }
- repeated Status results = 1;
- }
-
- Body body = 1;
-
- Signature signature = 2;
-}
-
-message ListShardsForObjectRequest {
- message Body {
- string object_id = 1;
- string container_id = 2;
- }
-
- Body body = 1;
- Signature signature = 2;
-}
-
-message ListShardsForObjectResponse {
- message Body {
- // List of the node's shards storing object.
- repeated bytes shard_ID = 1;
- }
-
- Body body = 1;
- Signature signature = 2;
+ Body body = 1;
+ Signature signature = 2;
}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 44849d591..cc96a98df 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -2,27 +2,7 @@
package control
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
-)
-
-type HealthCheckRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
- _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
- _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -34,93 +14,18 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckRequest struct {
- Body *HealthCheckRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
- _ json.Marshaler = (*HealthCheckRequest)(nil)
- _ json.Unmarshaler = (*HealthCheckRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -133,6 +38,27 @@ func (x *HealthCheckRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -148,176 +74,13 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *HealthCheckRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthCheckRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- x.Body = v
-}
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthCheckRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthCheckRequest_Body
- f = new(HealthCheckRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckResponse_Body struct {
- NetmapStatus NetmapStatus `json:"netmapStatus"`
- HealthStatus HealthStatus `json:"healthStatus"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
- _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
- _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -330,207 +93,27 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if int32(x.NetmapStatus) != 0 {
- mm.AppendInt32(1, int32(x.NetmapStatus))
- }
- if int32(x.HealthStatus) != 0 {
- mm.AppendInt32(2, int32(x.HealthStatus))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.EnumMarshal(1, buf[offset:], int32(x.NetmapStatus))
+ offset += proto.EnumMarshal(2, buf[offset:], int32(x.HealthStatus))
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // NetmapStatus
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NetmapStatus")
- }
- x.NetmapStatus = NetmapStatus(data)
- case 2: // HealthStatus
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
- }
- x.HealthStatus = HealthStatus(data)
- }
- }
- return nil
-}
-func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
- if x != nil {
- return x.NetmapStatus
- }
- return 0
-}
-func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
- x.NetmapStatus = v
-}
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return 0
-}
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- x.HealthStatus = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"netmapStatus\":"
- out.RawString(prefix)
- v := int32(x.NetmapStatus)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"healthStatus\":"
- out.RawString(prefix)
- v := int32(x.HealthStatus)
- if vv, ok := HealthStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "netmapStatus":
- {
- var f NetmapStatus
- var parsedValue NetmapStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := NetmapStatus_value[v]; ok {
- parsedValue = NetmapStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = NetmapStatus(vv)
- case float64:
- parsedValue = NetmapStatus(v)
- }
- f = parsedValue
- x.NetmapStatus = f
- }
- case "healthStatus":
- {
- var f HealthStatus
- var parsedValue HealthStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := HealthStatus_value[v]; ok {
- parsedValue = HealthStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = HealthStatus(vv)
- case float64:
- parsedValue = HealthStatus(v)
- }
- f = parsedValue
- x.HealthStatus = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthCheckResponse struct {
- Body *HealthCheckResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
- _ json.Marshaler = (*HealthCheckResponse)(nil)
- _ json.Unmarshaler = (*HealthCheckResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -543,6 +126,27 @@ func (x *HealthCheckResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -558,176 +162,13 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *HealthCheckResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthCheckResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- x.Body = v
-}
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthCheckResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthCheckResponse_Body
- f = new(HealthCheckResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetNetmapStatusRequest_Body struct {
- Status NetmapStatus `json:"status"`
- ForceMaintenance bool `json:"forceMaintenance"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest_Body)(nil)
- _ json.Marshaler = (*SetNetmapStatusRequest_Body)(nil)
- _ json.Unmarshaler = (*SetNetmapStatusRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -740,186 +181,27 @@ func (x *SetNetmapStatusRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if int32(x.Status) != 0 {
- mm.AppendInt32(1, int32(x.Status))
- }
- if x.ForceMaintenance {
- mm.AppendBool(2, x.ForceMaintenance)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status))
+ offset += proto.BoolMarshal(2, buf[offset:], x.ForceMaintenance)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Status
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Status")
- }
- x.Status = NetmapStatus(data)
- case 2: // ForceMaintenance
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ForceMaintenance")
- }
- x.ForceMaintenance = data
- }
- }
- return nil
-}
-func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
- if x != nil {
- return x.Status
- }
- return 0
-}
-func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
- x.Status = v
-}
-func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
- if x != nil {
- return x.ForceMaintenance
- }
- return false
-}
-func (x *SetNetmapStatusRequest_Body) SetForceMaintenance(v bool) {
- x.ForceMaintenance = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
- out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"forceMaintenance\":"
- out.RawString(prefix)
- out.Bool(x.ForceMaintenance)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "status":
- {
- var f NetmapStatus
- var parsedValue NetmapStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := NetmapStatus_value[v]; ok {
- parsedValue = NetmapStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = NetmapStatus(vv)
- case float64:
- parsedValue = NetmapStatus(v)
- }
- f = parsedValue
- x.Status = f
- }
- case "forceMaintenance":
- {
- var f bool
- f = in.Bool()
- x.ForceMaintenance = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetNetmapStatusRequest struct {
- Body *SetNetmapStatusRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest)(nil)
- _ json.Marshaler = (*SetNetmapStatusRequest)(nil)
- _ json.Unmarshaler = (*SetNetmapStatusRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -932,6 +214,27 @@ func (x *SetNetmapStatusRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetNetmapStatusRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -947,174 +250,13 @@ func (x *SetNetmapStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SetNetmapStatusRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SetNetmapStatusRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
- x.Body = v
-}
-func (x *SetNetmapStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SetNetmapStatusRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SetNetmapStatusRequest_Body
- f = new(SetNetmapStatusRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetNetmapStatusResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse_Body)(nil)
- _ json.Marshaler = (*SetNetmapStatusResponse_Body)(nil)
- _ json.Unmarshaler = (*SetNetmapStatusResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1125,93 +267,18 @@ func (x *SetNetmapStatusResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *SetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetNetmapStatusResponse struct {
- Body *SetNetmapStatusResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse)(nil)
- _ json.Marshaler = (*SetNetmapStatusResponse)(nil)
- _ json.Unmarshaler = (*SetNetmapStatusResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1224,6 +291,27 @@ func (x *SetNetmapStatusResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetNetmapStatusResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1239,866 +327,13 @@ func (x *SetNetmapStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SetNetmapStatusResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SetNetmapStatusResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
- x.Body = v
-}
-func (x *SetNetmapStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SetNetmapStatusResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SetNetmapStatusResponse_Body
- f = new(SetNetmapStatusResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNetmapStatusRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest_Body)(nil)
- _ json.Marshaler = (*GetNetmapStatusRequest_Body)(nil)
- _ json.Unmarshaler = (*GetNetmapStatusRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetNetmapStatusRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNetmapStatusRequest struct {
- Body *GetNetmapStatusRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest)(nil)
- _ json.Marshaler = (*GetNetmapStatusRequest)(nil)
- _ json.Unmarshaler = (*GetNetmapStatusRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetNetmapStatusRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *GetNetmapStatusRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *GetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetNetmapStatusRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetNetmapStatusRequest) SetBody(v *GetNetmapStatusRequest_Body) {
- x.Body = v
-}
-func (x *GetNetmapStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetNetmapStatusRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetNetmapStatusRequest_Body
- f = new(GetNetmapStatusRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNetmapStatusResponse_Body struct {
- Status NetmapStatus `json:"status"`
- Epoch uint64 `json:"epoch"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse_Body)(nil)
- _ json.Marshaler = (*GetNetmapStatusResponse_Body)(nil)
- _ json.Unmarshaler = (*GetNetmapStatusResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetNetmapStatusResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.EnumSize(1, int32(x.Status))
- size += proto.UInt64Size(2, x.Epoch)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if int32(x.Status) != 0 {
- mm.AppendInt32(1, int32(x.Status))
- }
- if x.Epoch != 0 {
- mm.AppendUint64(2, x.Epoch)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Status
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Status")
- }
- x.Status = NetmapStatus(data)
- case 2: // Epoch
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Epoch")
- }
- x.Epoch = data
- }
- }
- return nil
-}
-func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus {
- if x != nil {
- return x.Status
- }
- return 0
-}
-func (x *GetNetmapStatusResponse_Body) SetStatus(v NetmapStatus) {
- x.Status = v
-}
-func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-func (x *GetNetmapStatusResponse_Body) SetEpoch(v uint64) {
- x.Epoch = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
- out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"epoch\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "status":
- {
- var f NetmapStatus
- var parsedValue NetmapStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := NetmapStatus_value[v]; ok {
- parsedValue = NetmapStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = NetmapStatus(vv)
- case float64:
- parsedValue = NetmapStatus(v)
- }
- f = parsedValue
- x.Status = f
- }
- case "epoch":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Epoch = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNetmapStatusResponse struct {
- Body *GetNetmapStatusResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse)(nil)
- _ json.Marshaler = (*GetNetmapStatusResponse)(nil)
- _ json.Unmarshaler = (*GetNetmapStatusResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetNetmapStatusResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *GetNetmapStatusResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *GetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetNetmapStatusResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetNetmapStatusResponse) SetBody(v *GetNetmapStatusResponse_Body) {
- x.Body = v
-}
-func (x *GetNetmapStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetNetmapStatusResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetNetmapStatusResponse_Body
- f = new(GetNetmapStatusResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DropObjectsRequest_Body struct {
- AddressList [][]byte `json:"addressList"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DropObjectsRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DropObjectsRequest_Body)(nil)
- _ json.Marshaler = (*DropObjectsRequest_Body)(nil)
- _ json.Unmarshaler = (*DropObjectsRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2110,155 +345,26 @@ func (x *DropObjectsRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DropObjectsRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DropObjectsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DropObjectsRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.AddressList {
- mm.AppendBytes(1, x.AddressList[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.AddressList)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DropObjectsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // AddressList
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "AddressList")
- }
- x.AddressList = append(x.AddressList, data)
- }
- }
- return nil
-}
-func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
- if x != nil {
- return x.AddressList
- }
- return nil
-}
-func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
- x.AddressList = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DropObjectsRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"addressList\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.AddressList {
- if i != 0 {
- out.RawByte(',')
- }
- if x.AddressList[i] != nil {
- out.Base64Bytes(x.AddressList[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DropObjectsRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "addressList":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.AddressList = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DropObjectsRequest struct {
- Body *DropObjectsRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DropObjectsRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*DropObjectsRequest)(nil)
- _ json.Marshaler = (*DropObjectsRequest)(nil)
- _ json.Unmarshaler = (*DropObjectsRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2271,6 +377,27 @@ func (x *DropObjectsRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DropObjectsRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2286,174 +413,13 @@ func (x *DropObjectsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DropObjectsRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *DropObjectsRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DropObjectsRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DropObjectsRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
- x.Body = v
-}
-func (x *DropObjectsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DropObjectsRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DropObjectsRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DropObjectsRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DropObjectsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DropObjectsRequest_Body
- f = new(DropObjectsRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DropObjectsResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DropObjectsResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DropObjectsResponse_Body)(nil)
- _ json.Marshaler = (*DropObjectsResponse_Body)(nil)
- _ json.Unmarshaler = (*DropObjectsResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2464,93 +430,18 @@ func (x *DropObjectsResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DropObjectsResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DropObjectsResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *DropObjectsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DropObjectsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DropObjectsResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DropObjectsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DropObjectsResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DropObjectsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DropObjectsResponse struct {
- Body *DropObjectsResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DropObjectsResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*DropObjectsResponse)(nil)
- _ json.Marshaler = (*DropObjectsResponse)(nil)
- _ json.Unmarshaler = (*DropObjectsResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2563,6 +454,27 @@ func (x *DropObjectsResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DropObjectsResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2578,174 +490,13 @@ func (x *DropObjectsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DropObjectsResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *DropObjectsResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DropObjectsResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DropObjectsResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
- x.Body = v
-}
-func (x *DropObjectsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DropObjectsResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DropObjectsResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DropObjectsResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DropObjectsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DropObjectsResponse_Body
- f = new(DropObjectsResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsRequest_Body)(nil)
- _ json.Marshaler = (*ListShardsRequest_Body)(nil)
- _ json.Unmarshaler = (*ListShardsRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2756,93 +507,18 @@ func (x *ListShardsRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ListShardsRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *ListShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsRequest struct {
- Body *ListShardsRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsRequest)(nil)
- _ json.Marshaler = (*ListShardsRequest)(nil)
- _ json.Unmarshaler = (*ListShardsRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2855,6 +531,27 @@ func (x *ListShardsRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ListShardsRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2870,175 +567,13 @@ func (x *ListShardsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *ListShardsRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
- x.Body = v
-}
-func (x *ListShardsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsRequest_Body
- f = new(ListShardsRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsResponse_Body struct {
- Shards []ShardInfo `json:"shards"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsResponse_Body)(nil)
- _ json.Marshaler = (*ListShardsResponse_Body)(nil)
- _ json.Unmarshaler = (*ListShardsResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3047,155 +582,33 @@ func (x *ListShardsResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Shards {
- size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i])
+ size += proto.NestedStructureSize(1, x.Shards[i])
}
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ListShardsResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
for i := range x.Shards {
- x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Shards[i])
}
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Shards
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shards")
- }
- x.Shards = append(x.Shards, ShardInfo{})
- ff := &x.Shards[len(x.Shards)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsResponse_Body) GetShards() []ShardInfo {
- if x != nil {
- return x.Shards
- }
- return nil
-}
-func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) {
- x.Shards = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shards\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shards {
- if i != 0 {
- out.RawByte(',')
- }
- x.Shards[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shards":
- {
- var f ShardInfo
- var list []ShardInfo
- in.Delim('[')
- for !in.IsDelim(']') {
- f = ShardInfo{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Shards = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsResponse struct {
- Body *ListShardsResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsResponse)(nil)
- _ json.Marshaler = (*ListShardsResponse)(nil)
- _ json.Unmarshaler = (*ListShardsResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3208,6 +621,27 @@ func (x *ListShardsResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ListShardsResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3223,177 +657,13 @@ func (x *ListShardsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *ListShardsResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
- x.Body = v
-}
-func (x *ListShardsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsResponse_Body
- f = new(ListShardsResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetShardModeRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- Mode ShardMode `json:"mode"`
- ResetErrorCounter bool `json:"resetErrorCounter"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetShardModeRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SetShardModeRequest_Body)(nil)
- _ json.Marshaler = (*SetShardModeRequest_Body)(nil)
- _ json.Unmarshaler = (*SetShardModeRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3407,244 +677,28 @@ func (x *SetShardModeRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetShardModeRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SetShardModeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetShardModeRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if int32(x.Mode) != 0 {
- mm.AppendInt32(2, int32(x.Mode))
- }
- if x.ResetErrorCounter {
- mm.AppendBool(3, x.ResetErrorCounter)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
+ offset += proto.EnumMarshal(2, buf[offset:], int32(x.Mode))
+ offset += proto.BoolMarshal(3, buf[offset:], x.ResetErrorCounter)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetShardModeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // Mode
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Mode")
- }
- x.Mode = ShardMode(data)
- case 3: // ResetErrorCounter
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ResetErrorCounter")
- }
- x.ResetErrorCounter = data
- }
- }
- return nil
-}
-func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *SetShardModeRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *SetShardModeRequest_Body) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return 0
-}
-func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
- x.Mode = v
-}
-func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
- if x != nil {
- return x.ResetErrorCounter
- }
- return false
-}
-func (x *SetShardModeRequest_Body) SetResetErrorCounter(v bool) {
- x.ResetErrorCounter = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetShardModeRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"mode\":"
- out.RawString(prefix)
- v := int32(x.Mode)
- if vv, ok := ShardMode_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"resetErrorCounter\":"
- out.RawString(prefix)
- out.Bool(x.ResetErrorCounter)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetShardModeRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "mode":
- {
- var f ShardMode
- var parsedValue ShardMode
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := ShardMode_value[v]; ok {
- parsedValue = ShardMode(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = ShardMode(vv)
- case float64:
- parsedValue = ShardMode(v)
- }
- f = parsedValue
- x.Mode = f
- }
- case "resetErrorCounter":
- {
- var f bool
- f = in.Bool()
- x.ResetErrorCounter = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetShardModeRequest struct {
- Body *SetShardModeRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetShardModeRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*SetShardModeRequest)(nil)
- _ json.Marshaler = (*SetShardModeRequest)(nil)
- _ json.Unmarshaler = (*SetShardModeRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3657,6 +711,27 @@ func (x *SetShardModeRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetShardModeRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3672,174 +747,13 @@ func (x *SetShardModeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetShardModeRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SetShardModeRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetShardModeRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SetShardModeRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
- x.Body = v
-}
-func (x *SetShardModeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SetShardModeRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetShardModeRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetShardModeRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetShardModeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SetShardModeRequest_Body
- f = new(SetShardModeRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetShardModeResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetShardModeResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SetShardModeResponse_Body)(nil)
- _ json.Marshaler = (*SetShardModeResponse_Body)(nil)
- _ json.Unmarshaler = (*SetShardModeResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3850,93 +764,18 @@ func (x *SetShardModeResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetShardModeResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetShardModeResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *SetShardModeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetShardModeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetShardModeResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetShardModeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetShardModeResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetShardModeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SetShardModeResponse struct {
- Body *SetShardModeResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SetShardModeResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*SetShardModeResponse)(nil)
- _ json.Marshaler = (*SetShardModeResponse)(nil)
- _ json.Unmarshaler = (*SetShardModeResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3949,6 +788,27 @@ func (x *SetShardModeResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SetShardModeResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3964,177 +824,13 @@ func (x *SetShardModeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SetShardModeResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SetShardModeResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SetShardModeResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SetShardModeResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
- x.Body = v
-}
-func (x *SetShardModeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SetShardModeResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SetShardModeResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SetShardModeResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SetShardModeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SetShardModeResponse_Body
- f = new(SetShardModeResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SynchronizeTreeRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- Height uint64 `json:"height"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest_Body)(nil)
- _ json.Marshaler = (*SynchronizeTreeRequest_Body)(nil)
- _ json.Unmarshaler = (*SynchronizeTreeRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4148,219 +844,28 @@ func (x *SynchronizeTreeRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SynchronizeTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SynchronizeTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SynchronizeTreeRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.Height != 0 {
- mm.AppendUint64(3, x.Height)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SynchronizeTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // Height
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Height")
- }
- x.Height = data
- }
- }
- return nil
-}
-func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *SynchronizeTreeRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *SynchronizeTreeRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-func (x *SynchronizeTreeRequest_Body) SetHeight(v uint64) {
- x.Height = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SynchronizeTreeRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"height\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SynchronizeTreeRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "height":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Height = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SynchronizeTreeRequest struct {
- Body *SynchronizeTreeRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest)(nil)
- _ json.Marshaler = (*SynchronizeTreeRequest)(nil)
- _ json.Unmarshaler = (*SynchronizeTreeRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4373,6 +878,27 @@ func (x *SynchronizeTreeRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SynchronizeTreeRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -4388,174 +914,13 @@ func (x *SynchronizeTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SynchronizeTreeRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SynchronizeTreeRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SynchronizeTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SynchronizeTreeRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
- x.Body = v
-}
-func (x *SynchronizeTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SynchronizeTreeRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SynchronizeTreeRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SynchronizeTreeRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SynchronizeTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SynchronizeTreeRequest_Body
- f = new(SynchronizeTreeRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SynchronizeTreeResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse_Body)(nil)
- _ json.Marshaler = (*SynchronizeTreeResponse_Body)(nil)
- _ json.Unmarshaler = (*SynchronizeTreeResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4566,93 +931,18 @@ func (x *SynchronizeTreeResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SynchronizeTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SynchronizeTreeResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *SynchronizeTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SynchronizeTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SynchronizeTreeResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SynchronizeTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SynchronizeTreeResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SynchronizeTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SynchronizeTreeResponse struct {
- Body *SynchronizeTreeResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse)(nil)
- _ json.Marshaler = (*SynchronizeTreeResponse)(nil)
- _ json.Unmarshaler = (*SynchronizeTreeResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4665,6 +955,27 @@ func (x *SynchronizeTreeResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *SynchronizeTreeResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -4680,176 +991,13 @@ func (x *SynchronizeTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SynchronizeTreeResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *SynchronizeTreeResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SynchronizeTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SynchronizeTreeResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
- x.Body = v
-}
-func (x *SynchronizeTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SynchronizeTreeResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SynchronizeTreeResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SynchronizeTreeResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SynchronizeTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SynchronizeTreeResponse_Body
- f = new(SynchronizeTreeResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type EvacuateShardRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*EvacuateShardRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest_Body)(nil)
- _ json.Marshaler = (*EvacuateShardRequest_Body)(nil)
- _ json.Unmarshaler = (*EvacuateShardRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4862,189 +1010,27 @@ func (x *EvacuateShardRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *EvacuateShardRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *EvacuateShardRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *EvacuateShardRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if x.IgnoreErrors {
- mm.AppendBool(2, x.IgnoreErrors)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
+ offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *EvacuateShardRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // IgnoreErrors
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
- }
- x.IgnoreErrors = data
- }
- }
- return nil
-}
-func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *EvacuateShardRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-func (x *EvacuateShardRequest_Body) SetIgnoreErrors(v bool) {
- x.IgnoreErrors = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *EvacuateShardRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
- out.RawString(prefix)
- out.Bool(x.IgnoreErrors)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *EvacuateShardRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "ignoreErrors":
- {
- var f bool
- f = in.Bool()
- x.IgnoreErrors = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type EvacuateShardRequest struct {
- Body *EvacuateShardRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*EvacuateShardRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest)(nil)
- _ json.Marshaler = (*EvacuateShardRequest)(nil)
- _ json.Unmarshaler = (*EvacuateShardRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5057,6 +1043,27 @@ func (x *EvacuateShardRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *EvacuateShardRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -5072,175 +1079,13 @@ func (x *EvacuateShardRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *EvacuateShardRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *EvacuateShardRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *EvacuateShardRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(EvacuateShardRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *EvacuateShardRequest) SetBody(v *EvacuateShardRequest_Body) {
- x.Body = v
-}
-func (x *EvacuateShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *EvacuateShardRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *EvacuateShardRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *EvacuateShardRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *EvacuateShardRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *EvacuateShardRequest_Body
- f = new(EvacuateShardRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type EvacuateShardResponse_Body struct {
- Count uint32 `json:"count"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*EvacuateShardResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse_Body)(nil)
- _ json.Marshaler = (*EvacuateShardResponse_Body)(nil)
- _ json.Unmarshaler = (*EvacuateShardResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5252,139 +1097,26 @@ func (x *EvacuateShardResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *EvacuateShardResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *EvacuateShardResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *EvacuateShardResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Count != 0 {
- mm.AppendUint32(1, x.Count)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt32Marshal(1, buf[offset:], x.Count)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *EvacuateShardResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Count
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Count")
- }
- x.Count = data
- }
- }
- return nil
-}
-func (x *EvacuateShardResponse_Body) GetCount() uint32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-func (x *EvacuateShardResponse_Body) SetCount(v uint32) {
- x.Count = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *EvacuateShardResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"count\":"
- out.RawString(prefix)
- out.Uint32(x.Count)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *EvacuateShardResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "count":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Count = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type EvacuateShardResponse struct {
- Body *EvacuateShardResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*EvacuateShardResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse)(nil)
- _ json.Marshaler = (*EvacuateShardResponse)(nil)
- _ json.Unmarshaler = (*EvacuateShardResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5397,6 +1129,27 @@ func (x *EvacuateShardResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *EvacuateShardResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -5412,176 +1165,13 @@ func (x *EvacuateShardResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *EvacuateShardResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *EvacuateShardResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *EvacuateShardResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(EvacuateShardResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *EvacuateShardResponse) SetBody(v *EvacuateShardResponse_Body) {
- x.Body = v
-}
-func (x *EvacuateShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *EvacuateShardResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *EvacuateShardResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *EvacuateShardResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *EvacuateShardResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *EvacuateShardResponse_Body
- f = new(EvacuateShardResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type FlushCacheRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- Seal bool `json:"seal"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*FlushCacheRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*FlushCacheRequest_Body)(nil)
- _ json.Marshaler = (*FlushCacheRequest_Body)(nil)
- _ json.Unmarshaler = (*FlushCacheRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5590,193 +1180,29 @@ func (x *FlushCacheRequest_Body) StableSize() (size int) {
return 0
}
size += proto.RepeatedBytesSize(1, x.Shard_ID)
- size += proto.BoolSize(2, x.Seal)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *FlushCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *FlushCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *FlushCacheRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if x.Seal {
- mm.AppendBool(2, x.Seal)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *FlushCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // Seal
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Seal")
- }
- x.Seal = data
- }
- }
- return nil
-}
-func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *FlushCacheRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *FlushCacheRequest_Body) GetSeal() bool {
- if x != nil {
- return x.Seal
- }
- return false
-}
-func (x *FlushCacheRequest_Body) SetSeal(v bool) {
- x.Seal = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *FlushCacheRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"seal\":"
- out.RawString(prefix)
- out.Bool(x.Seal)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *FlushCacheRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "seal":
- {
- var f bool
- f = in.Bool()
- x.Seal = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type FlushCacheRequest struct {
- Body *FlushCacheRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*FlushCacheRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*FlushCacheRequest)(nil)
- _ json.Marshaler = (*FlushCacheRequest)(nil)
- _ json.Unmarshaler = (*FlushCacheRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5789,6 +1215,27 @@ func (x *FlushCacheRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *FlushCacheRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -5804,174 +1251,13 @@ func (x *FlushCacheRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *FlushCacheRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *FlushCacheRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *FlushCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(FlushCacheRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *FlushCacheRequest) SetBody(v *FlushCacheRequest_Body) {
- x.Body = v
-}
-func (x *FlushCacheRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *FlushCacheRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *FlushCacheRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *FlushCacheRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *FlushCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *FlushCacheRequest_Body
- f = new(FlushCacheRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type FlushCacheResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*FlushCacheResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*FlushCacheResponse_Body)(nil)
- _ json.Marshaler = (*FlushCacheResponse_Body)(nil)
- _ json.Unmarshaler = (*FlushCacheResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5982,93 +1268,18 @@ func (x *FlushCacheResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *FlushCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *FlushCacheResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *FlushCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *FlushCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *FlushCacheResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *FlushCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *FlushCacheResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *FlushCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type FlushCacheResponse struct {
- Body *FlushCacheResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*FlushCacheResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*FlushCacheResponse)(nil)
- _ json.Marshaler = (*FlushCacheResponse)(nil)
- _ json.Unmarshaler = (*FlushCacheResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6081,6 +1292,27 @@ func (x *FlushCacheResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *FlushCacheResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -6096,176 +1328,13 @@ func (x *FlushCacheResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *FlushCacheResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *FlushCacheResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *FlushCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(FlushCacheResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *FlushCacheResponse) SetBody(v *FlushCacheResponse_Body) {
- x.Body = v
-}
-func (x *FlushCacheResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *FlushCacheResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *FlushCacheResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *FlushCacheResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *FlushCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *FlushCacheResponse_Body
- f = new(FlushCacheResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DoctorRequest_Body struct {
- Concurrency uint32 `json:"concurrency"`
- RemoveDuplicates bool `json:"removeDuplicates"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DoctorRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DoctorRequest_Body)(nil)
- _ json.Marshaler = (*DoctorRequest_Body)(nil)
- _ json.Unmarshaler = (*DoctorRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6278,173 +1347,27 @@ func (x *DoctorRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DoctorRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DoctorRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Concurrency != 0 {
- mm.AppendUint32(1, x.Concurrency)
- }
- if x.RemoveDuplicates {
- mm.AppendBool(2, x.RemoveDuplicates)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt32Marshal(1, buf[offset:], x.Concurrency)
+ offset += proto.BoolMarshal(2, buf[offset:], x.RemoveDuplicates)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DoctorRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DoctorRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Concurrency
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Concurrency")
- }
- x.Concurrency = data
- case 2: // RemoveDuplicates
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "RemoveDuplicates")
- }
- x.RemoveDuplicates = data
- }
- }
- return nil
-}
-func (x *DoctorRequest_Body) GetConcurrency() uint32 {
- if x != nil {
- return x.Concurrency
- }
- return 0
-}
-func (x *DoctorRequest_Body) SetConcurrency(v uint32) {
- x.Concurrency = v
-}
-func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
- if x != nil {
- return x.RemoveDuplicates
- }
- return false
-}
-func (x *DoctorRequest_Body) SetRemoveDuplicates(v bool) {
- x.RemoveDuplicates = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DoctorRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"concurrency\":"
- out.RawString(prefix)
- out.Uint32(x.Concurrency)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"removeDuplicates\":"
- out.RawString(prefix)
- out.Bool(x.RemoveDuplicates)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DoctorRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "concurrency":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Concurrency = f
- }
- case "removeDuplicates":
- {
- var f bool
- f = in.Bool()
- x.RemoveDuplicates = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DoctorRequest struct {
- Body *DoctorRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DoctorRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*DoctorRequest)(nil)
- _ json.Marshaler = (*DoctorRequest)(nil)
- _ json.Unmarshaler = (*DoctorRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6457,6 +1380,27 @@ func (x *DoctorRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -6472,174 +1416,13 @@ func (x *DoctorRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DoctorRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *DoctorRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DoctorRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DoctorRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DoctorRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DoctorRequest) SetBody(v *DoctorRequest_Body) {
- x.Body = v
-}
-func (x *DoctorRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DoctorRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DoctorRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DoctorRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DoctorRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DoctorRequest_Body
- f = new(DoctorRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DoctorResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DoctorResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DoctorResponse_Body)(nil)
- _ json.Marshaler = (*DoctorResponse_Body)(nil)
- _ json.Unmarshaler = (*DoctorResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6650,93 +1433,18 @@ func (x *DoctorResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DoctorResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *DoctorResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DoctorResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DoctorResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DoctorResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DoctorResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DoctorResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DoctorResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DoctorResponse struct {
- Body *DoctorResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DoctorResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*DoctorResponse)(nil)
- _ json.Marshaler = (*DoctorResponse)(nil)
- _ json.Unmarshaler = (*DoctorResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6749,6 +1457,27 @@ func (x *DoctorResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -6764,215 +1493,13 @@ func (x *DoctorResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DoctorResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *DoctorResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DoctorResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DoctorResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DoctorResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DoctorResponse) SetBody(v *DoctorResponse_Body) {
- x.Body = v
-}
-func (x *DoctorResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DoctorResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DoctorResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DoctorResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DoctorResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DoctorResponse_Body
- f = new(DoctorResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardEvacuationRequest_Body_Scope int32
-
-const (
- StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0
- StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1
- StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2
-)
-
-var (
- StartShardEvacuationRequest_Body_Scope_name = map[int32]string{
- 0: "NONE",
- 1: "OBJECTS",
- 2: "TREES",
- }
- StartShardEvacuationRequest_Body_Scope_value = map[string]int32{
- "NONE": 0,
- "OBJECTS": 1,
- "TREES": 2,
- }
-)
-
-func (x StartShardEvacuationRequest_Body_Scope) String() string {
- if v, ok := StartShardEvacuationRequest_Body_Scope_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
- if v, ok := StartShardEvacuationRequest_Body_Scope_value[s]; ok {
- *x = StartShardEvacuationRequest_Body_Scope(v)
- return true
- }
- return false
-}
-
-type StartShardEvacuationRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Scope uint32 `json:"scope"`
- ContainerWorkerCount uint32 `json:"containerWorkerCount"`
- ObjectWorkerCount uint32 `json:"objectWorkerCount"`
- RepOneOnly bool `json:"repOneOnly"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest_Body)(nil)
- _ json.Marshaler = (*StartShardEvacuationRequest_Body)(nil)
- _ json.Unmarshaler = (*StartShardEvacuationRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6982,356 +1509,30 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
}
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
- size += proto.UInt32Size(3, x.Scope)
- size += proto.UInt32Size(4, x.ContainerWorkerCount)
- size += proto.UInt32Size(5, x.ObjectWorkerCount)
- size += proto.BoolSize(6, x.RepOneOnly)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StartShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if x.IgnoreErrors {
- mm.AppendBool(2, x.IgnoreErrors)
- }
- if x.Scope != 0 {
- mm.AppendUint32(3, x.Scope)
- }
- if x.ContainerWorkerCount != 0 {
- mm.AppendUint32(4, x.ContainerWorkerCount)
- }
- if x.ObjectWorkerCount != 0 {
- mm.AppendUint32(5, x.ObjectWorkerCount)
- }
- if x.RepOneOnly {
- mm.AppendBool(6, x.RepOneOnly)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
+ offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // IgnoreErrors
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
- }
- x.IgnoreErrors = data
- case 3: // Scope
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Scope")
- }
- x.Scope = data
- case 4: // ContainerWorkerCount
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
- }
- x.ContainerWorkerCount = data
- case 5: // ObjectWorkerCount
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
- }
- x.ObjectWorkerCount = data
- case 6: // RepOneOnly
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
- }
- x.RepOneOnly = data
- }
- }
- return nil
-}
-func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *StartShardEvacuationRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-func (x *StartShardEvacuationRequest_Body) SetIgnoreErrors(v bool) {
- x.IgnoreErrors = v
-}
-func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
- if x != nil {
- return x.Scope
- }
- return 0
-}
-func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
- x.Scope = v
-}
-func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
- if x != nil {
- return x.ContainerWorkerCount
- }
- return 0
-}
-func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
- x.ContainerWorkerCount = v
-}
-func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
- if x != nil {
- return x.ObjectWorkerCount
- }
- return 0
-}
-func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
- x.ObjectWorkerCount = v
-}
-func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
- if x != nil {
- return x.RepOneOnly
- }
- return false
-}
-func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
- x.RepOneOnly = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
- out.RawString(prefix)
- out.Bool(x.IgnoreErrors)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"scope\":"
- out.RawString(prefix)
- out.Uint32(x.Scope)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerWorkerCount\":"
- out.RawString(prefix)
- out.Uint32(x.ContainerWorkerCount)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"objectWorkerCount\":"
- out.RawString(prefix)
- out.Uint32(x.ObjectWorkerCount)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"repOneOnly\":"
- out.RawString(prefix)
- out.Bool(x.RepOneOnly)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "ignoreErrors":
- {
- var f bool
- f = in.Bool()
- x.IgnoreErrors = f
- }
- case "scope":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Scope = f
- }
- case "containerWorkerCount":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ContainerWorkerCount = f
- }
- case "objectWorkerCount":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ObjectWorkerCount = f
- }
- case "repOneOnly":
- {
- var f bool
- f = in.Bool()
- x.RepOneOnly = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardEvacuationRequest struct {
- Body *StartShardEvacuationRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest)(nil)
- _ json.Marshaler = (*StartShardEvacuationRequest)(nil)
- _ json.Unmarshaler = (*StartShardEvacuationRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7344,6 +1545,27 @@ func (x *StartShardEvacuationRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StartShardEvacuationRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7359,174 +1581,13 @@ func (x *StartShardEvacuationRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StartShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *StartShardEvacuationRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StartShardEvacuationRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StartShardEvacuationRequest) SetBody(v *StartShardEvacuationRequest_Body) {
- x.Body = v
-}
-func (x *StartShardEvacuationRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StartShardEvacuationRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardEvacuationRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardEvacuationRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StartShardEvacuationRequest_Body
- f = new(StartShardEvacuationRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardEvacuationResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse_Body)(nil)
- _ json.Marshaler = (*StartShardEvacuationResponse_Body)(nil)
- _ json.Unmarshaler = (*StartShardEvacuationResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7537,93 +1598,18 @@ func (x *StartShardEvacuationResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StartShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *StartShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardEvacuationResponse struct {
- Body *StartShardEvacuationResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse)(nil)
- _ json.Marshaler = (*StartShardEvacuationResponse)(nil)
- _ json.Unmarshaler = (*StartShardEvacuationResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7636,6 +1622,27 @@ func (x *StartShardEvacuationResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StartShardEvacuationResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7651,174 +1658,13 @@ func (x *StartShardEvacuationResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StartShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *StartShardEvacuationResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StartShardEvacuationResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StartShardEvacuationResponse) SetBody(v *StartShardEvacuationResponse_Body) {
- x.Body = v
-}
-func (x *StartShardEvacuationResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StartShardEvacuationResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardEvacuationResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardEvacuationResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StartShardEvacuationResponse_Body
- f = new(StartShardEvacuationResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7829,93 +1675,18 @@ func (x *GetShardEvacuationStatusRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *GetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusRequest struct {
- Body *GetShardEvacuationStatusRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusRequest)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7928,6 +1699,27 @@ func (x *GetShardEvacuationStatusRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7943,210 +1735,13 @@ func (x *GetShardEvacuationStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetShardEvacuationStatusRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetShardEvacuationStatusRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetShardEvacuationStatusRequest) SetBody(v *GetShardEvacuationStatusRequest_Body) {
- x.Body = v
-}
-func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetShardEvacuationStatusRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetShardEvacuationStatusRequest_Body
- f = new(GetShardEvacuationStatusRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusResponse_Body_Status int32
-
-const (
- GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0
- GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1
- GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2
-)
-
-var (
- GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{
- 0: "EVACUATE_SHARD_STATUS_UNDEFINED",
- 1: "RUNNING",
- 2: "COMPLETED",
- }
- GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{
- "EVACUATE_SHARD_STATUS_UNDEFINED": 0,
- "RUNNING": 1,
- "COMPLETED": 2,
- }
-)
-
-func (x GetShardEvacuationStatusResponse_Body_Status) String() string {
- if v, ok := GetShardEvacuationStatusResponse_Body_Status_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *GetShardEvacuationStatusResponse_Body_Status) FromString(s string) bool {
- if v, ok := GetShardEvacuationStatusResponse_Body_Status_value[s]; ok {
- *x = GetShardEvacuationStatusResponse_Body_Status(v)
- return true
- }
- return false
-}
-
-type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct {
- Value int64 `json:"value"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8158,140 +1753,26 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableSize() (size
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Value != 0 {
- mm.AppendInt64(1, x.Value)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.Int64Marshal(1, buf[offset:], x.Value)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_UnixTimestamp")
- }
- switch fc.FieldNum {
- case 1: // Value
- data, ok := fc.Int64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Value")
- }
- x.Value = data
- }
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) SetValue(v int64) {
- x.Value = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "value":
- {
- var f int64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := int64(v)
- f = pv
- x.Value = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusResponse_Body_Duration struct {
- Seconds int64 `json:"seconds"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8303,151 +1784,26 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) StableSize() (size int)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_Duration) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Seconds != 0 {
- mm.AppendInt64(1, x.Seconds)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.Int64Marshal(1, buf[offset:], x.Seconds)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_Duration")
- }
- switch fc.FieldNum {
- case 1: // Seconds
- data, ok := fc.Int64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Seconds")
- }
- x.Seconds = data
- }
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 {
- if x != nil {
- return x.Seconds
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body_Duration) SetSeconds(v int64) {
- x.Seconds = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"seconds\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "seconds":
- {
- var f int64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := int64(v)
- f = pv
- x.Seconds = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusResponse_Body struct {
- TotalObjects uint64 `json:"totalObjects"`
- EvacuatedObjects uint64 `json:"evacuatedObjects"`
- FailedObjects uint64 `json:"failedObjects"`
- Shard_ID [][]byte `json:"shardID"`
- Status GetShardEvacuationStatusResponse_Body_Status `json:"status"`
- Duration *GetShardEvacuationStatusResponse_Body_Duration `json:"duration"`
- StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `json:"startedAt"`
- ErrorMessage string `json:"errorMessage"`
- SkippedObjects uint64 `json:"skippedObjects"`
- TotalTrees uint64 `json:"totalTrees"`
- EvacuatedTrees uint64 `json:"evacuatedTrees"`
- FailedTrees uint64 `json:"failedTrees"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8455,643 +1811,44 @@ func (x *GetShardEvacuationStatusResponse_Body) StableSize() (size int) {
if x == nil {
return 0
}
- size += proto.UInt64Size(1, x.TotalObjects)
- size += proto.UInt64Size(2, x.EvacuatedObjects)
- size += proto.UInt64Size(3, x.FailedObjects)
+ size += proto.UInt64Size(1, x.Total)
+ size += proto.UInt64Size(2, x.Evacuated)
+ size += proto.UInt64Size(3, x.Failed)
size += proto.RepeatedBytesSize(4, x.Shard_ID)
size += proto.EnumSize(5, int32(x.Status))
size += proto.NestedStructureSize(6, x.Duration)
size += proto.NestedStructureSize(7, x.StartedAt)
size += proto.StringSize(8, x.ErrorMessage)
- size += proto.UInt64Size(9, x.SkippedObjects)
- size += proto.UInt64Size(10, x.TotalTrees)
- size += proto.UInt64Size(11, x.EvacuatedTrees)
- size += proto.UInt64Size(12, x.FailedTrees)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.TotalObjects != 0 {
- mm.AppendUint64(1, x.TotalObjects)
- }
- if x.EvacuatedObjects != 0 {
- mm.AppendUint64(2, x.EvacuatedObjects)
- }
- if x.FailedObjects != 0 {
- mm.AppendUint64(3, x.FailedObjects)
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(4, x.Shard_ID[j])
- }
- if int32(x.Status) != 0 {
- mm.AppendInt32(5, int32(x.Status))
- }
- if x.Duration != nil {
- x.Duration.EmitProtobuf(mm.AppendMessage(6))
- }
- if x.StartedAt != nil {
- x.StartedAt.EmitProtobuf(mm.AppendMessage(7))
- }
- if len(x.ErrorMessage) != 0 {
- mm.AppendString(8, x.ErrorMessage)
- }
- if x.SkippedObjects != 0 {
- mm.AppendUint64(9, x.SkippedObjects)
- }
- if x.TotalTrees != 0 {
- mm.AppendUint64(10, x.TotalTrees)
- }
- if x.EvacuatedTrees != 0 {
- mm.AppendUint64(11, x.EvacuatedTrees)
- }
- if x.FailedTrees != 0 {
- mm.AppendUint64(12, x.FailedTrees)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.Total)
+ offset += proto.UInt64Marshal(2, buf[offset:], x.Evacuated)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.Failed)
+ offset += proto.RepeatedBytesMarshal(4, buf[offset:], x.Shard_ID)
+ offset += proto.EnumMarshal(5, buf[offset:], int32(x.Status))
+ offset += proto.NestedStructureMarshal(6, buf[offset:], x.Duration)
+ offset += proto.NestedStructureMarshal(7, buf[offset:], x.StartedAt)
+ offset += proto.StringMarshal(8, buf[offset:], x.ErrorMessage)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // TotalObjects
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TotalObjects")
- }
- x.TotalObjects = data
- case 2: // EvacuatedObjects
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "EvacuatedObjects")
- }
- x.EvacuatedObjects = data
- case 3: // FailedObjects
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "FailedObjects")
- }
- x.FailedObjects = data
- case 4: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 5: // Status
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Status")
- }
- x.Status = GetShardEvacuationStatusResponse_Body_Status(data)
- case 6: // Duration
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Duration")
- }
- x.Duration = new(GetShardEvacuationStatusResponse_Body_Duration)
- if err := x.Duration.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 7: // StartedAt
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "StartedAt")
- }
- x.StartedAt = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
- if err := x.StartedAt.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 8: // ErrorMessage
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ErrorMessage")
- }
- x.ErrorMessage = data
- case 9: // SkippedObjects
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "SkippedObjects")
- }
- x.SkippedObjects = data
- case 10: // TotalTrees
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TotalTrees")
- }
- x.TotalTrees = data
- case 11: // EvacuatedTrees
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "EvacuatedTrees")
- }
- x.EvacuatedTrees = data
- case 12: // FailedTrees
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "FailedTrees")
- }
- x.FailedTrees = data
- }
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 {
- if x != nil {
- return x.TotalObjects
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetTotalObjects(v uint64) {
- x.TotalObjects = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 {
- if x != nil {
- return x.EvacuatedObjects
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedObjects(v uint64) {
- x.EvacuatedObjects = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 {
- if x != nil {
- return x.FailedObjects
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetFailedObjects(v uint64) {
- x.FailedObjects = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status {
- if x != nil {
- return x.Status
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetStatus(v GetShardEvacuationStatusResponse_Body_Status) {
- x.Status = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration {
- if x != nil {
- return x.Duration
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetDuration(v *GetShardEvacuationStatusResponse_Body_Duration) {
- x.Duration = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp {
- if x != nil {
- return x.StartedAt
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetStartedAt(v *GetShardEvacuationStatusResponse_Body_UnixTimestamp) {
- x.StartedAt = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string {
- if x != nil {
- return x.ErrorMessage
- }
- return ""
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetErrorMessage(v string) {
- x.ErrorMessage = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 {
- if x != nil {
- return x.SkippedObjects
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetSkippedObjects(v uint64) {
- x.SkippedObjects = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 {
- if x != nil {
- return x.TotalTrees
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetTotalTrees(v uint64) {
- x.TotalTrees = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 {
- if x != nil {
- return x.EvacuatedTrees
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedTrees(v uint64) {
- x.EvacuatedTrees = v
-}
-func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 {
- if x != nil {
- return x.FailedTrees
- }
- return 0
-}
-func (x *GetShardEvacuationStatusResponse_Body) SetFailedTrees(v uint64) {
- x.FailedTrees = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"totalObjects\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuatedObjects\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"failedObjects\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"status\":"
- out.RawString(prefix)
- v := int32(x.Status)
- if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"duration\":"
- out.RawString(prefix)
- x.Duration.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"startedAt\":"
- out.RawString(prefix)
- x.StartedAt.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"errorMessage\":"
- out.RawString(prefix)
- out.String(x.ErrorMessage)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"skippedObjects\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"totalTrees\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuatedTrees\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"failedTrees\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "totalObjects":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.TotalObjects = f
- }
- case "evacuatedObjects":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.EvacuatedObjects = f
- }
- case "failedObjects":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.FailedObjects = f
- }
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "status":
- {
- var f GetShardEvacuationStatusResponse_Body_Status
- var parsedValue GetShardEvacuationStatusResponse_Body_Status
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := GetShardEvacuationStatusResponse_Body_Status_value[v]; ok {
- parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
- case float64:
- parsedValue = GetShardEvacuationStatusResponse_Body_Status(v)
- }
- f = parsedValue
- x.Status = f
- }
- case "duration":
- {
- var f *GetShardEvacuationStatusResponse_Body_Duration
- f = new(GetShardEvacuationStatusResponse_Body_Duration)
- f.UnmarshalEasyJSON(in)
- x.Duration = f
- }
- case "startedAt":
- {
- var f *GetShardEvacuationStatusResponse_Body_UnixTimestamp
- f = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
- f.UnmarshalEasyJSON(in)
- x.StartedAt = f
- }
- case "errorMessage":
- {
- var f string
- f = in.String()
- x.ErrorMessage = f
- }
- case "skippedObjects":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.SkippedObjects = f
- }
- case "totalTrees":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.TotalTrees = f
- }
- case "evacuatedTrees":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.EvacuatedTrees = f
- }
- case "failedTrees":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.FailedTrees = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetShardEvacuationStatusResponse struct {
- Body *GetShardEvacuationStatusResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse)(nil)
- _ json.Marshaler = (*GetShardEvacuationStatusResponse)(nil)
- _ json.Unmarshaler = (*GetShardEvacuationStatusResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -9104,6 +1861,27 @@ func (x *GetShardEvacuationStatusResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetShardEvacuationStatusResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -9119,758 +1897,13 @@ func (x *GetShardEvacuationStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetShardEvacuationStatusResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetShardEvacuationStatusResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse) SetBody(v *GetShardEvacuationStatusResponse_Body) {
- x.Body = v
-}
-func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetShardEvacuationStatusResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetShardEvacuationStatusResponse_Body
- f = new(GetShardEvacuationStatusResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ResetShardEvacuationStatusRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
- _ json.Marshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
- _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ResetShardEvacuationStatusRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ResetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ResetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ResetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ResetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ResetShardEvacuationStatusRequest struct {
- Body *ResetShardEvacuationStatusRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
- _ json.Marshaler = (*ResetShardEvacuationStatusRequest)(nil)
- _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ResetShardEvacuationStatusRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ResetShardEvacuationStatusRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ResetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ResetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ResetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ResetShardEvacuationStatusRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusRequest) SetBody(v *ResetShardEvacuationStatusRequest_Body) {
- x.Body = v
-}
-func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ResetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ResetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ResetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ResetShardEvacuationStatusRequest_Body
- f = new(ResetShardEvacuationStatusRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ResetShardEvacuationStatusResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
- _ json.Marshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
- _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ResetShardEvacuationStatusResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ResetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ResetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ResetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ResetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ResetShardEvacuationStatusResponse struct {
- Body *ResetShardEvacuationStatusResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
- _ json.Marshaler = (*ResetShardEvacuationStatusResponse)(nil)
- _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ResetShardEvacuationStatusResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ResetShardEvacuationStatusResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ResetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ResetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ResetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ResetShardEvacuationStatusResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusResponse) SetBody(v *ResetShardEvacuationStatusResponse_Body) {
- x.Body = v
-}
-func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ResetShardEvacuationStatusResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ResetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ResetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ResetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ResetShardEvacuationStatusResponse_Body
- f = new(ResetShardEvacuationStatusResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StopShardEvacuationRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest_Body)(nil)
- _ json.Marshaler = (*StopShardEvacuationRequest_Body)(nil)
- _ json.Unmarshaler = (*StopShardEvacuationRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -9881,93 +1914,18 @@ func (x *StopShardEvacuationRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StopShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StopShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *StopShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StopShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StopShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StopShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StopShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StopShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StopShardEvacuationRequest struct {
- Body *StopShardEvacuationRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest)(nil)
- _ json.Marshaler = (*StopShardEvacuationRequest)(nil)
- _ json.Unmarshaler = (*StopShardEvacuationRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -9980,6 +1938,27 @@ func (x *StopShardEvacuationRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StopShardEvacuationRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -9995,174 +1974,13 @@ func (x *StopShardEvacuationRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StopShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StopShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *StopShardEvacuationRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StopShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StopShardEvacuationRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StopShardEvacuationRequest) SetBody(v *StopShardEvacuationRequest_Body) {
- x.Body = v
-}
-func (x *StopShardEvacuationRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StopShardEvacuationRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StopShardEvacuationRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StopShardEvacuationRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StopShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StopShardEvacuationRequest_Body
- f = new(StopShardEvacuationRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StopShardEvacuationResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse_Body)(nil)
- _ json.Marshaler = (*StopShardEvacuationResponse_Body)(nil)
- _ json.Unmarshaler = (*StopShardEvacuationResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -10173,93 +1991,18 @@ func (x *StopShardEvacuationResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StopShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StopShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *StopShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StopShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StopShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StopShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StopShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StopShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StopShardEvacuationResponse struct {
- Body *StopShardEvacuationResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse)(nil)
- _ json.Marshaler = (*StopShardEvacuationResponse)(nil)
- _ json.Unmarshaler = (*StopShardEvacuationResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -10272,6 +2015,27 @@ func (x *StopShardEvacuationResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *StopShardEvacuationResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -10287,7743 +2051,9 @@ func (x *StopShardEvacuationResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StopShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StopShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StopShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StopShardEvacuationResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StopShardEvacuationResponse) SetBody(v *StopShardEvacuationResponse_Body) {
- x.Body = v
-}
-func (x *StopShardEvacuationResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StopShardEvacuationResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StopShardEvacuationResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StopShardEvacuationResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StopShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StopShardEvacuationResponse_Body
- f = new(StopShardEvacuationResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddChainLocalOverrideRequest_Body struct {
- Target *ChainTarget `json:"target"`
- Chain []byte `json:"chain"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
- _ json.Marshaler = (*AddChainLocalOverrideRequest_Body)(nil)
- _ json.Unmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *AddChainLocalOverrideRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Target)
- size += proto.BytesSize(2, x.Chain)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Target != nil {
- x.Target.EmitProtobuf(mm.AppendMessage(1))
- }
- if len(x.Chain) != 0 {
- mm.AppendBytes(2, x.Chain)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Target
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Target")
- }
- x.Target = new(ChainTarget)
- if err := x.Target.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Chain
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Chain")
- }
- x.Chain = data
- }
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
- x.Target = v
-}
-func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte {
- if x != nil {
- return x.Chain
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest_Body) SetChain(v []byte) {
- x.Chain = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
- x.Target.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chain\":"
- out.RawString(prefix)
- if x.Chain != nil {
- out.Base64Bytes(x.Chain)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "target":
- {
- var f *ChainTarget
- f = new(ChainTarget)
- f.UnmarshalEasyJSON(in)
- x.Target = f
- }
- case "chain":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Chain = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddChainLocalOverrideRequest struct {
- Body *AddChainLocalOverrideRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest)(nil)
- _ json.Marshaler = (*AddChainLocalOverrideRequest)(nil)
- _ json.Unmarshaler = (*AddChainLocalOverrideRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *AddChainLocalOverrideRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *AddChainLocalOverrideRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *AddChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddChainLocalOverrideRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest) SetBody(v *AddChainLocalOverrideRequest_Body) {
- x.Body = v
-}
-func (x *AddChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddChainLocalOverrideRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddChainLocalOverrideRequest_Body
- f = new(AddChainLocalOverrideRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddChainLocalOverrideResponse_Body struct {
- ChainId []byte `json:"chainId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
- _ json.Marshaler = (*AddChainLocalOverrideResponse_Body)(nil)
- _ json.Unmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *AddChainLocalOverrideResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.BytesSize(1, x.ChainId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ChainId) != 0 {
- mm.AppendBytes(1, x.ChainId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // ChainId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ChainId")
- }
- x.ChainId = data
- }
- }
- return nil
-}
-func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-func (x *AddChainLocalOverrideResponse_Body) SetChainId(v []byte) {
- x.ChainId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
- out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "chainId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ChainId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddChainLocalOverrideResponse struct {
- Body *AddChainLocalOverrideResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse)(nil)
- _ json.Marshaler = (*AddChainLocalOverrideResponse)(nil)
- _ json.Unmarshaler = (*AddChainLocalOverrideResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *AddChainLocalOverrideResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *AddChainLocalOverrideResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *AddChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddChainLocalOverrideResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddChainLocalOverrideResponse) SetBody(v *AddChainLocalOverrideResponse_Body) {
- x.Body = v
-}
-func (x *AddChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddChainLocalOverrideResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddChainLocalOverrideResponse_Body
- f = new(AddChainLocalOverrideResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetChainLocalOverrideRequest_Body struct {
- Target *ChainTarget `json:"target"`
- ChainId []byte `json:"chainId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
- _ json.Marshaler = (*GetChainLocalOverrideRequest_Body)(nil)
- _ json.Unmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetChainLocalOverrideRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Target)
- size += proto.BytesSize(2, x.ChainId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Target != nil {
- x.Target.EmitProtobuf(mm.AppendMessage(1))
- }
- if len(x.ChainId) != 0 {
- mm.AppendBytes(2, x.ChainId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Target
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Target")
- }
- x.Target = new(ChainTarget)
- if err := x.Target.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // ChainId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ChainId")
- }
- x.ChainId = data
- }
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
- x.Target = v
-}
-func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest_Body) SetChainId(v []byte) {
- x.ChainId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
- x.Target.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
- out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "target":
- {
- var f *ChainTarget
- f = new(ChainTarget)
- f.UnmarshalEasyJSON(in)
- x.Target = f
- }
- case "chainId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ChainId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetChainLocalOverrideRequest struct {
- Body *GetChainLocalOverrideRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest)(nil)
- _ json.Marshaler = (*GetChainLocalOverrideRequest)(nil)
- _ json.Unmarshaler = (*GetChainLocalOverrideRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetChainLocalOverrideRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *GetChainLocalOverrideRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *GetChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetChainLocalOverrideRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest) SetBody(v *GetChainLocalOverrideRequest_Body) {
- x.Body = v
-}
-func (x *GetChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetChainLocalOverrideRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetChainLocalOverrideRequest_Body
- f = new(GetChainLocalOverrideRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetChainLocalOverrideResponse_Body struct {
- Chain []byte `json:"chain"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
- _ json.Marshaler = (*GetChainLocalOverrideResponse_Body)(nil)
- _ json.Unmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetChainLocalOverrideResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.BytesSize(1, x.Chain)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.Chain) != 0 {
- mm.AppendBytes(1, x.Chain)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Chain
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Chain")
- }
- x.Chain = data
- }
- }
- return nil
-}
-func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte {
- if x != nil {
- return x.Chain
- }
- return nil
-}
-func (x *GetChainLocalOverrideResponse_Body) SetChain(v []byte) {
- x.Chain = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chain\":"
- out.RawString(prefix)
- if x.Chain != nil {
- out.Base64Bytes(x.Chain)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "chain":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Chain = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetChainLocalOverrideResponse struct {
- Body *GetChainLocalOverrideResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse)(nil)
- _ json.Marshaler = (*GetChainLocalOverrideResponse)(nil)
- _ json.Unmarshaler = (*GetChainLocalOverrideResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *GetChainLocalOverrideResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *GetChainLocalOverrideResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *GetChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetChainLocalOverrideResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetChainLocalOverrideResponse) SetBody(v *GetChainLocalOverrideResponse_Body) {
- x.Body = v
-}
-func (x *GetChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetChainLocalOverrideResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetChainLocalOverrideResponse_Body
- f = new(GetChainLocalOverrideResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListChainLocalOverridesRequest_Body struct {
- Target *ChainTarget `json:"target"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
- _ json.Marshaler = (*ListChainLocalOverridesRequest_Body)(nil)
- _ json.Unmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListChainLocalOverridesRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Target)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListChainLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Target != nil {
- x.Target.EmitProtobuf(mm.AppendMessage(1))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListChainLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Target
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Target")
- }
- x.Target = new(ChainTarget)
- if err := x.Target.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-func (x *ListChainLocalOverridesRequest_Body) SetTarget(v *ChainTarget) {
- x.Target = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListChainLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
- x.Target.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListChainLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListChainLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "target":
- {
- var f *ChainTarget
- f = new(ChainTarget)
- f.UnmarshalEasyJSON(in)
- x.Target = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListChainLocalOverridesRequest struct {
- Body *ListChainLocalOverridesRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest)(nil)
- _ json.Marshaler = (*ListChainLocalOverridesRequest)(nil)
- _ json.Unmarshaler = (*ListChainLocalOverridesRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListChainLocalOverridesRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListChainLocalOverridesRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListChainLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListChainLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListChainLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListChainLocalOverridesRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListChainLocalOverridesRequest) SetBody(v *ListChainLocalOverridesRequest_Body) {
- x.Body = v
-}
-func (x *ListChainLocalOverridesRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListChainLocalOverridesRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListChainLocalOverridesRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListChainLocalOverridesRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListChainLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListChainLocalOverridesRequest_Body
- f = new(ListChainLocalOverridesRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListChainLocalOverridesResponse_Body struct {
- Chains [][]byte `json:"chains"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
- _ json.Marshaler = (*ListChainLocalOverridesResponse_Body)(nil)
- _ json.Unmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListChainLocalOverridesResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Chains)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListChainLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListChainLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Chains {
- mm.AppendBytes(1, x.Chains[j])
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListChainLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Chains
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Chains")
- }
- x.Chains = append(x.Chains, data)
- }
- }
- return nil
-}
-func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte {
- if x != nil {
- return x.Chains
- }
- return nil
-}
-func (x *ListChainLocalOverridesResponse_Body) SetChains(v [][]byte) {
- x.Chains = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListChainLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chains\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Chains {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Chains[i] != nil {
- out.Base64Bytes(x.Chains[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListChainLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "chains":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Chains = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListChainLocalOverridesResponse struct {
- Body *ListChainLocalOverridesResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse)(nil)
- _ json.Marshaler = (*ListChainLocalOverridesResponse)(nil)
- _ json.Unmarshaler = (*ListChainLocalOverridesResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListChainLocalOverridesResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListChainLocalOverridesResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListChainLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListChainLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListChainLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListChainLocalOverridesResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListChainLocalOverridesResponse) SetBody(v *ListChainLocalOverridesResponse_Body) {
- x.Body = v
-}
-func (x *ListChainLocalOverridesResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListChainLocalOverridesResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListChainLocalOverridesResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListChainLocalOverridesResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListChainLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListChainLocalOverridesResponse_Body
- f = new(ListChainLocalOverridesResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListTargetsLocalOverridesRequest_Body struct {
- ChainName string `json:"chainName"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
- _ json.Marshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
- _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListTargetsLocalOverridesRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.StringSize(1, x.ChainName)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListTargetsLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListTargetsLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ChainName) != 0 {
- mm.AppendString(1, x.ChainName)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ChainName
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ChainName")
- }
- x.ChainName = data
- }
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string {
- if x != nil {
- return x.ChainName
- }
- return ""
-}
-func (x *ListTargetsLocalOverridesRequest_Body) SetChainName(v string) {
- x.ChainName = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListTargetsLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainName\":"
- out.RawString(prefix)
- out.String(x.ChainName)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "chainName":
- {
- var f string
- f = in.String()
- x.ChainName = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListTargetsLocalOverridesRequest struct {
- Body *ListTargetsLocalOverridesRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
- _ json.Marshaler = (*ListTargetsLocalOverridesRequest)(nil)
- _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListTargetsLocalOverridesRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListTargetsLocalOverridesRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListTargetsLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListTargetsLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListTargetsLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListTargetsLocalOverridesRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesRequest) SetBody(v *ListTargetsLocalOverridesRequest_Body) {
- x.Body = v
-}
-func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListTargetsLocalOverridesRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListTargetsLocalOverridesRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListTargetsLocalOverridesRequest_Body
- f = new(ListTargetsLocalOverridesRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListTargetsLocalOverridesResponse_Body struct {
- Targets []ChainTarget `json:"targets"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
- _ json.Marshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
- _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- for i := range x.Targets {
- size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i])
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListTargetsLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for i := range x.Targets {
- x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Targets
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Targets")
- }
- x.Targets = append(x.Targets, ChainTarget{})
- ff := &x.Targets[len(x.Targets)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget {
- if x != nil {
- return x.Targets
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) {
- x.Targets = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListTargetsLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"targets\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Targets {
- if i != 0 {
- out.RawByte(',')
- }
- x.Targets[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "targets":
- {
- var f ChainTarget
- var list []ChainTarget
- in.Delim('[')
- for !in.IsDelim(']') {
- f = ChainTarget{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Targets = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListTargetsLocalOverridesResponse struct {
- Body *ListTargetsLocalOverridesResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
- _ json.Marshaler = (*ListTargetsLocalOverridesResponse)(nil)
- _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListTargetsLocalOverridesResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListTargetsLocalOverridesResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListTargetsLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListTargetsLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListTargetsLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListTargetsLocalOverridesResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesResponse) SetBody(v *ListTargetsLocalOverridesResponse_Body) {
- x.Body = v
-}
-func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListTargetsLocalOverridesResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListTargetsLocalOverridesResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListTargetsLocalOverridesResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListTargetsLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListTargetsLocalOverridesResponse_Body
- f = new(ListTargetsLocalOverridesResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverrideRequest_Body struct {
- Target *ChainTarget `json:"target"`
- ChainId []byte `json:"chainId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverrideRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Target)
- size += proto.BytesSize(2, x.ChainId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Target != nil {
- x.Target.EmitProtobuf(mm.AppendMessage(1))
- }
- if len(x.ChainId) != 0 {
- mm.AppendBytes(2, x.ChainId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Target
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Target")
- }
- x.Target = new(ChainTarget)
- if err := x.Target.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // ChainId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ChainId")
- }
- x.ChainId = data
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
- x.Target = v
-}
-func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest_Body) SetChainId(v []byte) {
- x.ChainId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
- x.Target.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"chainId\":"
- out.RawString(prefix)
- if x.ChainId != nil {
- out.Base64Bytes(x.ChainId)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "target":
- {
- var f *ChainTarget
- f = new(ChainTarget)
- f.UnmarshalEasyJSON(in)
- x.Target = f
- }
- case "chainId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ChainId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverrideRequest struct {
- Body *RemoveChainLocalOverrideRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverrideRequest)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverrideRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveChainLocalOverrideRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveChainLocalOverrideRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest) SetBody(v *RemoveChainLocalOverrideRequest_Body) {
- x.Body = v
-}
-func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveChainLocalOverrideRequest_Body
- f = new(RemoveChainLocalOverrideRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverrideResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverrideResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverrideResponse struct {
- Body *RemoveChainLocalOverrideResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverrideResponse)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverrideResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveChainLocalOverrideResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveChainLocalOverrideResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideResponse) SetBody(v *RemoveChainLocalOverrideResponse_Body) {
- x.Body = v
-}
-func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveChainLocalOverrideResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveChainLocalOverrideResponse_Body
- f = new(RemoveChainLocalOverrideResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverridesByTargetRequest_Body struct {
- Target *ChainTarget `json:"target"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Target)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Target != nil {
- x.Target.EmitProtobuf(mm.AppendMessage(1))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Target
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Target")
- }
- x.Target = new(ChainTarget)
- if err := x.Target.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) SetTarget(v *ChainTarget) {
- x.Target = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"target\":"
- out.RawString(prefix)
- x.Target.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "target":
- {
- var f *ChainTarget
- f = new(ChainTarget)
- f.UnmarshalEasyJSON(in)
- x.Target = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverridesByTargetRequest struct {
- Body *RemoveChainLocalOverridesByTargetRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverridesByTargetRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveChainLocalOverridesByTargetRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveChainLocalOverridesByTargetRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveChainLocalOverridesByTargetRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) SetBody(v *RemoveChainLocalOverridesByTargetRequest_Body) {
- x.Body = v
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveChainLocalOverridesByTargetRequest_Body
- f = new(RemoveChainLocalOverridesByTargetRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverridesByTargetResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveChainLocalOverridesByTargetResponse struct {
- Body *RemoveChainLocalOverridesByTargetResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
- _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
- _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RemoveChainLocalOverridesByTargetResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RemoveChainLocalOverridesByTargetResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RemoveChainLocalOverridesByTargetResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveChainLocalOverridesByTargetResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) SetBody(v *RemoveChainLocalOverridesByTargetResponse_Body) {
- x.Body = v
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveChainLocalOverridesByTargetResponse_Body
- f = new(RemoveChainLocalOverridesByTargetResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SealWriteCacheRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Async bool `json:"async"`
- RestoreMode bool `json:"restoreMode"`
- Shrink bool `json:"shrink"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SealWriteCacheRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest_Body)(nil)
- _ json.Marshaler = (*SealWriteCacheRequest_Body)(nil)
- _ json.Unmarshaler = (*SealWriteCacheRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- size += proto.BoolSize(2, x.IgnoreErrors)
- size += proto.BoolSize(3, x.Async)
- size += proto.BoolSize(4, x.RestoreMode)
- size += proto.BoolSize(5, x.Shrink)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SealWriteCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SealWriteCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if x.IgnoreErrors {
- mm.AppendBool(2, x.IgnoreErrors)
- }
- if x.Async {
- mm.AppendBool(3, x.Async)
- }
- if x.RestoreMode {
- mm.AppendBool(4, x.RestoreMode)
- }
- if x.Shrink {
- mm.AppendBool(5, x.Shrink)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SealWriteCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // IgnoreErrors
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
- }
- x.IgnoreErrors = data
- case 3: // Async
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Async")
- }
- x.Async = data
- case 4: // RestoreMode
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "RestoreMode")
- }
- x.RestoreMode = data
- case 5: // Shrink
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shrink")
- }
- x.Shrink = data
- }
- }
- return nil
-}
-func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *SealWriteCacheRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-func (x *SealWriteCacheRequest_Body) SetIgnoreErrors(v bool) {
- x.IgnoreErrors = v
-}
-func (x *SealWriteCacheRequest_Body) GetAsync() bool {
- if x != nil {
- return x.Async
- }
- return false
-}
-func (x *SealWriteCacheRequest_Body) SetAsync(v bool) {
- x.Async = v
-}
-func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
- if x != nil {
- return x.RestoreMode
- }
- return false
-}
-func (x *SealWriteCacheRequest_Body) SetRestoreMode(v bool) {
- x.RestoreMode = v
-}
-func (x *SealWriteCacheRequest_Body) GetShrink() bool {
- if x != nil {
- return x.Shrink
- }
- return false
-}
-func (x *SealWriteCacheRequest_Body) SetShrink(v bool) {
- x.Shrink = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SealWriteCacheRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ignoreErrors\":"
- out.RawString(prefix)
- out.Bool(x.IgnoreErrors)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"async\":"
- out.RawString(prefix)
- out.Bool(x.Async)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"restoreMode\":"
- out.RawString(prefix)
- out.Bool(x.RestoreMode)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shrink\":"
- out.RawString(prefix)
- out.Bool(x.Shrink)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SealWriteCacheRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "ignoreErrors":
- {
- var f bool
- f = in.Bool()
- x.IgnoreErrors = f
- }
- case "async":
- {
- var f bool
- f = in.Bool()
- x.Async = f
- }
- case "restoreMode":
- {
- var f bool
- f = in.Bool()
- x.RestoreMode = f
- }
- case "shrink":
- {
- var f bool
- f = in.Bool()
- x.Shrink = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SealWriteCacheRequest struct {
- Body *SealWriteCacheRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SealWriteCacheRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest)(nil)
- _ json.Marshaler = (*SealWriteCacheRequest)(nil)
- _ json.Unmarshaler = (*SealWriteCacheRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *SealWriteCacheRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *SealWriteCacheRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *SealWriteCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SealWriteCacheRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SealWriteCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SealWriteCacheRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SealWriteCacheRequest) SetBody(v *SealWriteCacheRequest_Body) {
- x.Body = v
-}
-func (x *SealWriteCacheRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SealWriteCacheRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SealWriteCacheRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SealWriteCacheRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SealWriteCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SealWriteCacheRequest_Body
- f = new(SealWriteCacheRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SealWriteCacheResponse_Body_Status struct {
- Shard_ID []byte `json:"shardID"`
- Success bool `json:"success"`
- Error string `json:"error"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
- _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
- _ json.Marshaler = (*SealWriteCacheResponse_Body_Status)(nil)
- _ json.Unmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *SealWriteCacheResponse_Body_Status) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.BoolSize(2, x.Success)
- size += proto.StringSize(3, x.Error)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SealWriteCacheResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SealWriteCacheResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.Shard_ID) != 0 {
- mm.AppendBytes(1, x.Shard_ID)
- }
- if x.Success {
- mm.AppendBool(2, x.Success)
- }
- if len(x.Error) != 0 {
- mm.AppendString(3, x.Error)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SealWriteCacheResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body_Status")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = data
- case 2: // Success
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Success")
- }
- x.Success = data
- case 3: // Error
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Error")
- }
- x.Error = data
- }
- }
- return nil
-}
-func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *SealWriteCacheResponse_Body_Status) SetShard_ID(v []byte) {
- x.Shard_ID = v
-}
-func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool {
- if x != nil {
- return x.Success
- }
- return false
-}
-func (x *SealWriteCacheResponse_Body_Status) SetSuccess(v bool) {
- x.Success = v
-}
-func (x *SealWriteCacheResponse_Body_Status) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-func (x *SealWriteCacheResponse_Body_Status) SetError(v string) {
- x.Error = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SealWriteCacheResponse_Body_Status) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"success\":"
- out.RawString(prefix)
- out.Bool(x.Success)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"error\":"
- out.RawString(prefix)
- out.String(x.Error)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SealWriteCacheResponse_Body_Status) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Shard_ID = f
- }
- case "success":
- {
- var f bool
- f = in.Bool()
- x.Success = f
- }
- case "error":
- {
- var f string
- f = in.String()
- x.Error = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SealWriteCacheResponse_Body struct {
- Results []SealWriteCacheResponse_Body_Status `json:"results"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body)(nil)
- _ json.Marshaler = (*SealWriteCacheResponse_Body)(nil)
- _ json.Unmarshaler = (*SealWriteCacheResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *SealWriteCacheResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- for i := range x.Results {
- size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SealWriteCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for i := range x.Results {
- x.Results[i].EmitProtobuf(mm.AppendMessage(1))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Results
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Results")
- }
- x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{})
- ff := &x.Results[len(x.Results)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status {
- if x != nil {
- return x.Results
- }
- return nil
-}
-func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) {
- x.Results = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SealWriteCacheResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"results\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Results {
- if i != 0 {
- out.RawByte(',')
- }
- x.Results[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SealWriteCacheResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "results":
- {
- var f SealWriteCacheResponse_Body_Status
- var list []SealWriteCacheResponse_Body_Status
- in.Delim('[')
- for !in.IsDelim(']') {
- f = SealWriteCacheResponse_Body_Status{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Results = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type SealWriteCacheResponse struct {
- Body *SealWriteCacheResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*SealWriteCacheResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse)(nil)
- _ json.Marshaler = (*SealWriteCacheResponse)(nil)
- _ json.Unmarshaler = (*SealWriteCacheResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *SealWriteCacheResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *SealWriteCacheResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *SealWriteCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *SealWriteCacheResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *SealWriteCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(SealWriteCacheResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *SealWriteCacheResponse) SetBody(v *SealWriteCacheResponse_Body) {
- x.Body = v
-}
-func (x *SealWriteCacheResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *SealWriteCacheResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *SealWriteCacheResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *SealWriteCacheResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *SealWriteCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *SealWriteCacheResponse_Body
- f = new(SealWriteCacheResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DetachShardsRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DetachShardsRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DetachShardsRequest_Body)(nil)
- _ json.Marshaler = (*DetachShardsRequest_Body)(nil)
- _ json.Unmarshaler = (*DetachShardsRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DetachShardsRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DetachShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DetachShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DetachShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- }
- }
- return nil
-}
-func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *DetachShardsRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DetachShardsRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DetachShardsRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DetachShardsRequest struct {
- Body *DetachShardsRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DetachShardsRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*DetachShardsRequest)(nil)
- _ json.Marshaler = (*DetachShardsRequest)(nil)
- _ json.Unmarshaler = (*DetachShardsRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DetachShardsRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DetachShardsRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DetachShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DetachShardsRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DetachShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DetachShardsRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DetachShardsRequest) SetBody(v *DetachShardsRequest_Body) {
- x.Body = v
-}
-func (x *DetachShardsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DetachShardsRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DetachShardsRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DetachShardsRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DetachShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DetachShardsRequest_Body
- f = new(DetachShardsRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DetachShardsResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DetachShardsResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*DetachShardsResponse_Body)(nil)
- _ json.Marshaler = (*DetachShardsResponse_Body)(nil)
- _ json.Unmarshaler = (*DetachShardsResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DetachShardsResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DetachShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DetachShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DetachShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DetachShardsResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DetachShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DetachShardsResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DetachShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type DetachShardsResponse struct {
- Body *DetachShardsResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*DetachShardsResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*DetachShardsResponse)(nil)
- _ json.Marshaler = (*DetachShardsResponse)(nil)
- _ json.Unmarshaler = (*DetachShardsResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DetachShardsResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DetachShardsResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DetachShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *DetachShardsResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *DetachShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(DetachShardsResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *DetachShardsResponse) SetBody(v *DetachShardsResponse_Body) {
- x.Body = v
-}
-func (x *DetachShardsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *DetachShardsResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *DetachShardsResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *DetachShardsResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *DetachShardsResponse_Body
- f = new(DetachShardsResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardRebuildRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- TargetFillPercent uint32 `json:"targetFillPercent"`
- ConcurrencyLimit uint32 `json:"concurrencyLimit"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil)
- _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil)
- _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *StartShardRebuildRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- size += proto.UInt32Size(2, x.TargetFillPercent)
- size += proto.UInt32Size(3, x.ConcurrencyLimit)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
- if x.TargetFillPercent != 0 {
- mm.AppendUint32(2, x.TargetFillPercent)
- }
- if x.ConcurrencyLimit != 0 {
- mm.AppendUint32(3, x.ConcurrencyLimit)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- case 2: // TargetFillPercent
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent")
- }
- x.TargetFillPercent = data
- case 3: // ConcurrencyLimit
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit")
- }
- x.ConcurrencyLimit = data
- }
- }
- return nil
-}
-func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 {
- if x != nil {
- return x.TargetFillPercent
- }
- return 0
-}
-func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) {
- x.TargetFillPercent = v
-}
-func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 {
- if x != nil {
- return x.ConcurrencyLimit
- }
- return 0
-}
-func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) {
- x.ConcurrencyLimit = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"targetFillPercent\":"
- out.RawString(prefix)
- out.Uint32(x.TargetFillPercent)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"concurrencyLimit\":"
- out.RawString(prefix)
- out.Uint32(x.ConcurrencyLimit)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- case "targetFillPercent":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.TargetFillPercent = f
- }
- case "concurrencyLimit":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ConcurrencyLimit = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardRebuildRequest struct {
- Body *StartShardRebuildRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil)
- _ json.Marshaler = (*StartShardRebuildRequest)(nil)
- _ json.Unmarshaler = (*StartShardRebuildRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *StartShardRebuildRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *StartShardRebuildRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StartShardRebuildRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) {
- x.Body = v
-}
-func (x *StartShardRebuildRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StartShardRebuildRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StartShardRebuildRequest_Body
- f = new(StartShardRebuildRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardRebuildResponse_Body_Status struct {
- Shard_ID []byte `json:"shardID"`
- Success bool `json:"success"`
- Error string `json:"error"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
- _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil)
- _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.BoolSize(2, x.Success)
- size += proto.StringSize(3, x.Error)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.Shard_ID) != 0 {
- mm.AppendBytes(1, x.Shard_ID)
- }
- if x.Success {
- mm.AppendBool(2, x.Success)
- }
- if len(x.Error) != 0 {
- mm.AppendString(3, x.Error)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = data
- case 2: // Success
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Success")
- }
- x.Success = data
- case 3: // Error
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Error")
- }
- x.Error = data
- }
- }
- return nil
-}
-func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) {
- x.Shard_ID = v
-}
-func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool {
- if x != nil {
- return x.Success
- }
- return false
-}
-func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) {
- x.Success = v
-}
-func (x *StartShardRebuildResponse_Body_Status) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-func (x *StartShardRebuildResponse_Body_Status) SetError(v string) {
- x.Error = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"success\":"
- out.RawString(prefix)
- out.Bool(x.Success)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"error\":"
- out.RawString(prefix)
- out.String(x.Error)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Shard_ID = f
- }
- case "success":
- {
- var f bool
- f = in.Bool()
- x.Success = f
- }
- case "error":
- {
- var f string
- f = in.String()
- x.Error = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardRebuildResponse_Body struct {
- Results []StartShardRebuildResponse_Body_Status `json:"results"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil)
- _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil)
- _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *StartShardRebuildResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- for i := range x.Results {
- size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
- }
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for i := range x.Results {
- x.Results[i].EmitProtobuf(mm.AppendMessage(1))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Results
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Results")
- }
- x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{})
- ff := &x.Results[len(x.Results)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status {
- if x != nil {
- return x.Results
- }
- return nil
-}
-func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) {
- x.Results = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"results\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Results {
- if i != 0 {
- out.RawByte(',')
- }
- x.Results[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "results":
- {
- var f StartShardRebuildResponse_Body_Status
- var list []StartShardRebuildResponse_Body_Status
- in.Delim('[')
- for !in.IsDelim(']') {
- f = StartShardRebuildResponse_Body_Status{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Results = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type StartShardRebuildResponse struct {
- Body *StartShardRebuildResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil)
- _ json.Marshaler = (*StartShardRebuildResponse)(nil)
- _ json.Unmarshaler = (*StartShardRebuildResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *StartShardRebuildResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *StartShardRebuildResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(StartShardRebuildResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) {
- x.Body = v
-}
-func (x *StartShardRebuildResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *StartShardRebuildResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *StartShardRebuildResponse_Body
- f = new(StartShardRebuildResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectRequest_Body struct {
- ObjectId string `json:"objectId"`
- ContainerId string `json:"containerId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.StringSize(1, x.ObjectId)
- size += proto.StringSize(2, x.ContainerId)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.ObjectId) != 0 {
- mm.AppendString(1, x.ObjectId)
- }
- if len(x.ContainerId) != 0 {
- mm.AppendString(2, x.ContainerId)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ObjectId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ObjectId")
- }
- x.ObjectId = data
- case 2: // ContainerId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest_Body) GetObjectId() string {
- if x != nil {
- return x.ObjectId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) {
- x.ObjectId = v
-}
-func (x *ListShardsForObjectRequest_Body) GetContainerId() string {
- if x != nil {
- return x.ContainerId
- }
- return ""
-}
-func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) {
- x.ContainerId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"objectId\":"
- out.RawString(prefix)
- out.String(x.ObjectId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- out.String(x.ContainerId)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "objectId":
- {
- var f string
- f = in.String()
- x.ObjectId = f
- }
- case "containerId":
- {
- var f string
- f = in.String()
- x.ContainerId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectRequest struct {
- Body *ListShardsForObjectRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Marshaler = (*ListShardsForObjectRequest)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectRequest) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectRequest_Body
- f = new(ListShardsForObjectRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse_Body struct {
- Shard_ID [][]byte `json:"shardID"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse_Body) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.RepeatedBytesSize(1, x.Shard_ID)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- for j := range x.Shard_ID {
- mm.AppendBytes(1, x.Shard_ID[j])
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = append(x.Shard_ID, data)
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) {
- x.Shard_ID = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Shard_ID {
- if i != 0 {
- out.RawByte(',')
- }
- if x.Shard_ID[i] != nil {
- out.Base64Bytes(x.Shard_ID[i])
- } else {
- out.String("")
- }
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- var list [][]byte
- in.Delim('[')
- for !in.IsDelim(']') {
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- list = append(list, f)
- in.WantComma()
- }
- x.Shard_ID = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ListShardsForObjectResponse struct {
- Body *ListShardsForObjectResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Marshaler = (*ListShardsForObjectResponse)(nil)
- _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *ListShardsForObjectResponse) StableSize() (size int) {
- if x == nil {
- return 0
- }
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *ListShardsForObjectResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ListShardsForObjectResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) {
- x.Body = v
-}
-func (x *ListShardsForObjectResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ListShardsForObjectResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ListShardsForObjectResponse_Body
- f = new(ListShardsForObjectResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
+func (x *StopShardEvacuationResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 045662ccf..8afc6086a 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.0
+// - protoc v3.21.9
// source: pkg/services/control/service.proto
package control
@@ -19,29 +19,18 @@ import (
const _ = grpc.SupportPackageIsVersion7
const (
- ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck"
- ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus"
- ControlService_GetNetmapStatus_FullMethodName = "/control.ControlService/GetNetmapStatus"
- ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
- ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
- ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
- ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
- ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
- ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
- ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus"
- ControlService_StopShardEvacuation_FullMethodName = "/control.ControlService/StopShardEvacuation"
- ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
- ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor"
- ControlService_AddChainLocalOverride_FullMethodName = "/control.ControlService/AddChainLocalOverride"
- ControlService_GetChainLocalOverride_FullMethodName = "/control.ControlService/GetChainLocalOverride"
- ControlService_ListChainLocalOverrides_FullMethodName = "/control.ControlService/ListChainLocalOverrides"
- ControlService_RemoveChainLocalOverride_FullMethodName = "/control.ControlService/RemoveChainLocalOverride"
- ControlService_RemoveChainLocalOverridesByTarget_FullMethodName = "/control.ControlService/RemoveChainLocalOverridesByTarget"
- ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides"
- ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
- ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
- ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
- ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject"
+ ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck"
+ ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus"
+ ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
+ ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
+ ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
+ ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
+ ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
+ ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation"
+ ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus"
+ ControlService_StopShardEvacuation_FullMethodName = "/control.ControlService/StopShardEvacuation"
+ ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
+ ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor"
)
// ControlServiceClient is the client API for ControlService service.
@@ -52,8 +41,6 @@ type ControlServiceClient interface {
HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Sets status of the storage node in FrostFS network map.
SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error)
- // Gets status of the storage node in FrostFS network map.
- GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error)
// Mark objects to be removed from node's local object storage.
DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error)
// Returns list that contains information about all shards of a node.
@@ -62,42 +49,19 @@ type ControlServiceClient interface {
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
GetShardEvacuationStatus(ctx context.Context, in *GetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*GetShardEvacuationStatusResponse, error)
- // ResetShardEvacuationStatus resets evacuation status if there is no running
- // evacuation process.
- ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error)
// StopShardEvacuation stops moving all data from one shard to the others.
StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error)
// Doctor performs storage restructuring operations on engine.
Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error)
- // Add local access policy engine overrides to a node.
- AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error)
- // Get local access policy engine overrides stored in the node by chain id.
- GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error)
- // List local access policy engine overrides stored in the node by container
- // id.
- ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error)
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error)
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error)
- // List targets of the local APE overrides stored in the node.
- ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error)
- // Flush objects from write-cache and move it to degraded read only mode.
- SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error)
- // DetachShards detaches and closes shards.
- DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
- // StartShardRebuild starts shard rebuild process.
- StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error)
}
type controlServiceClient struct {
@@ -126,15 +90,6 @@ func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetma
return out, nil
}
-func (c *controlServiceClient) GetNetmapStatus(ctx context.Context, in *GetNetmapStatusRequest, opts ...grpc.CallOption) (*GetNetmapStatusResponse, error) {
- out := new(GetNetmapStatusResponse)
- err := c.cc.Invoke(ctx, ControlService_GetNetmapStatus_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) {
out := new(DropObjectsResponse)
err := c.cc.Invoke(ctx, ControlService_DropObjects_FullMethodName, in, out, opts...)
@@ -171,6 +126,15 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
return out, nil
}
+func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
+ out := new(EvacuateShardResponse)
+ err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) {
out := new(StartShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...)
@@ -189,15 +153,6 @@ func (c *controlServiceClient) GetShardEvacuationStatus(ctx context.Context, in
return out, nil
}
-func (c *controlServiceClient) ResetShardEvacuationStatus(ctx context.Context, in *ResetShardEvacuationStatusRequest, opts ...grpc.CallOption) (*ResetShardEvacuationStatusResponse, error) {
- out := new(ResetShardEvacuationStatusResponse)
- err := c.cc.Invoke(ctx, ControlService_ResetShardEvacuationStatus_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *controlServiceClient) StopShardEvacuation(ctx context.Context, in *StopShardEvacuationRequest, opts ...grpc.CallOption) (*StopShardEvacuationResponse, error) {
out := new(StopShardEvacuationResponse)
err := c.cc.Invoke(ctx, ControlService_StopShardEvacuation_FullMethodName, in, out, opts...)
@@ -225,96 +180,6 @@ func (c *controlServiceClient) Doctor(ctx context.Context, in *DoctorRequest, op
return out, nil
}
-func (c *controlServiceClient) AddChainLocalOverride(ctx context.Context, in *AddChainLocalOverrideRequest, opts ...grpc.CallOption) (*AddChainLocalOverrideResponse, error) {
- out := new(AddChainLocalOverrideResponse)
- err := c.cc.Invoke(ctx, ControlService_AddChainLocalOverride_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) GetChainLocalOverride(ctx context.Context, in *GetChainLocalOverrideRequest, opts ...grpc.CallOption) (*GetChainLocalOverrideResponse, error) {
- out := new(GetChainLocalOverrideResponse)
- err := c.cc.Invoke(ctx, ControlService_GetChainLocalOverride_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) ListChainLocalOverrides(ctx context.Context, in *ListChainLocalOverridesRequest, opts ...grpc.CallOption) (*ListChainLocalOverridesResponse, error) {
- out := new(ListChainLocalOverridesResponse)
- err := c.cc.Invoke(ctx, ControlService_ListChainLocalOverrides_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) RemoveChainLocalOverride(ctx context.Context, in *RemoveChainLocalOverrideRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverrideResponse, error) {
- out := new(RemoveChainLocalOverrideResponse)
- err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverride_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) RemoveChainLocalOverridesByTarget(ctx context.Context, in *RemoveChainLocalOverridesByTargetRequest, opts ...grpc.CallOption) (*RemoveChainLocalOverridesByTargetResponse, error) {
- out := new(RemoveChainLocalOverridesByTargetResponse)
- err := c.cc.Invoke(ctx, ControlService_RemoveChainLocalOverridesByTarget_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) ListTargetsLocalOverrides(ctx context.Context, in *ListTargetsLocalOverridesRequest, opts ...grpc.CallOption) (*ListTargetsLocalOverridesResponse, error) {
- out := new(ListTargetsLocalOverridesResponse)
- err := c.cc.Invoke(ctx, ControlService_ListTargetsLocalOverrides_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error) {
- out := new(SealWriteCacheResponse)
- err := c.cc.Invoke(ctx, ControlService_SealWriteCache_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) {
- out := new(DetachShardsResponse)
- err := c.cc.Invoke(ctx, ControlService_DetachShards_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) {
- out := new(StartShardRebuildResponse)
- err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) {
- out := new(ListShardsForObjectResponse)
- err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -323,8 +188,6 @@ type ControlServiceServer interface {
HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Sets status of the storage node in FrostFS network map.
SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error)
- // Gets status of the storage node in FrostFS network map.
- GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error)
// Mark objects to be removed from node's local object storage.
DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error)
// Returns list that contains information about all shards of a node.
@@ -333,42 +196,19 @@ type ControlServiceServer interface {
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
+ // EvacuateShard moves all data from one shard to the others.
+ // Deprecated: Use StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
+ EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// StartShardEvacuation starts moving all data from one shard to the others.
StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error)
// GetShardEvacuationStatus returns evacuation status.
GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error)
- // ResetShardEvacuationStatus resets evacuation status if there is no running
- // evacuation process.
- ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error)
// StopShardEvacuation stops moving all data from one shard to the others.
StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error)
// Doctor performs storage restructuring operations on engine.
Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error)
- // Add local access policy engine overrides to a node.
- AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error)
- // Get local access policy engine overrides stored in the node by chain id.
- GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error)
- // List local access policy engine overrides stored in the node by container
- // id.
- ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error)
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error)
- // Remove local access policy engine overrides stored in the node by chaind
- // id.
- RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error)
- // List targets of the local APE overrides stored in the node.
- ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error)
- // Flush objects from write-cache and move it to degraded read only mode.
- SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error)
- // DetachShards detaches and closes shards.
- DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
- // StartShardRebuild starts shard rebuild process.
- StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
- // ListShardsForObject returns shard info where object is stored.
- ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -381,9 +221,6 @@ func (UnimplementedControlServiceServer) HealthCheck(context.Context, *HealthChe
func (UnimplementedControlServiceServer) SetNetmapStatus(context.Context, *SetNetmapStatusRequest) (*SetNetmapStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetNetmapStatus not implemented")
}
-func (UnimplementedControlServiceServer) GetNetmapStatus(context.Context, *GetNetmapStatusRequest) (*GetNetmapStatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetNetmapStatus not implemented")
-}
func (UnimplementedControlServiceServer) DropObjects(context.Context, *DropObjectsRequest) (*DropObjectsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DropObjects not implemented")
}
@@ -396,15 +233,15 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
+func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented")
+}
func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented")
}
func (UnimplementedControlServiceServer) GetShardEvacuationStatus(context.Context, *GetShardEvacuationStatusRequest) (*GetShardEvacuationStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetShardEvacuationStatus not implemented")
}
-func (UnimplementedControlServiceServer) ResetShardEvacuationStatus(context.Context, *ResetShardEvacuationStatusRequest) (*ResetShardEvacuationStatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ResetShardEvacuationStatus not implemented")
-}
func (UnimplementedControlServiceServer) StopShardEvacuation(context.Context, *StopShardEvacuationRequest) (*StopShardEvacuationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method StopShardEvacuation not implemented")
}
@@ -414,36 +251,6 @@ func (UnimplementedControlServiceServer) FlushCache(context.Context, *FlushCache
func (UnimplementedControlServiceServer) Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Doctor not implemented")
}
-func (UnimplementedControlServiceServer) AddChainLocalOverride(context.Context, *AddChainLocalOverrideRequest) (*AddChainLocalOverrideResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddChainLocalOverride not implemented")
-}
-func (UnimplementedControlServiceServer) GetChainLocalOverride(context.Context, *GetChainLocalOverrideRequest) (*GetChainLocalOverrideResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetChainLocalOverride not implemented")
-}
-func (UnimplementedControlServiceServer) ListChainLocalOverrides(context.Context, *ListChainLocalOverridesRequest) (*ListChainLocalOverridesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListChainLocalOverrides not implemented")
-}
-func (UnimplementedControlServiceServer) RemoveChainLocalOverride(context.Context, *RemoveChainLocalOverrideRequest) (*RemoveChainLocalOverrideResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverride not implemented")
-}
-func (UnimplementedControlServiceServer) RemoveChainLocalOverridesByTarget(context.Context, *RemoveChainLocalOverridesByTargetRequest) (*RemoveChainLocalOverridesByTargetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RemoveChainLocalOverridesByTarget not implemented")
-}
-func (UnimplementedControlServiceServer) ListTargetsLocalOverrides(context.Context, *ListTargetsLocalOverridesRequest) (*ListTargetsLocalOverridesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListTargetsLocalOverrides not implemented")
-}
-func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SealWriteCache not implemented")
-}
-func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented")
-}
-func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
-}
-func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented")
-}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -492,24 +299,6 @@ func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
-func _ControlService_GetNetmapStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetNetmapStatusRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).GetNetmapStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_GetNetmapStatus_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).GetNetmapStatus(ctx, req.(*GetNetmapStatusRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DropObjectsRequest)
if err := dec(in); err != nil {
@@ -582,6 +371,24 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EvacuateShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_EvacuateShard_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -618,24 +425,6 @@ func _ControlService_GetShardEvacuationStatus_Handler(srv interface{}, ctx conte
return interceptor(ctx, in, info, handler)
}
-func _ControlService_ResetShardEvacuationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ResetShardEvacuationStatusRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ResetShardEvacuationStatus_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ResetShardEvacuationStatus(ctx, req.(*ResetShardEvacuationStatusRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_StopShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StopShardEvacuationRequest)
if err := dec(in); err != nil {
@@ -690,186 +479,6 @@ func _ControlService_Doctor_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
-func _ControlService_AddChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddChainLocalOverrideRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).AddChainLocalOverride(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_AddChainLocalOverride_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).AddChainLocalOverride(ctx, req.(*AddChainLocalOverrideRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_GetChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetChainLocalOverrideRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).GetChainLocalOverride(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_GetChainLocalOverride_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).GetChainLocalOverride(ctx, req.(*GetChainLocalOverrideRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_ListChainLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListChainLocalOverridesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ListChainLocalOverrides_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ListChainLocalOverrides(ctx, req.(*ListChainLocalOverridesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_RemoveChainLocalOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RemoveChainLocalOverrideRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_RemoveChainLocalOverride_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).RemoveChainLocalOverride(ctx, req.(*RemoveChainLocalOverrideRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_RemoveChainLocalOverridesByTarget_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RemoveChainLocalOverridesByTargetRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_RemoveChainLocalOverridesByTarget_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).RemoveChainLocalOverridesByTarget(ctx, req.(*RemoveChainLocalOverridesByTargetRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_ListTargetsLocalOverrides_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListTargetsLocalOverridesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ListTargetsLocalOverrides_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ListTargetsLocalOverrides(ctx, req.(*ListTargetsLocalOverridesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_SealWriteCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SealWriteCacheRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).SealWriteCache(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_SealWriteCache_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).SealWriteCache(ctx, req.(*SealWriteCacheRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DetachShardsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).DetachShards(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_DetachShards_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).DetachShards(ctx, req.(*DetachShardsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StartShardRebuildRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).StartShardRebuild(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_StartShardRebuild_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListShardsForObjectRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_ListShardsForObject_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -885,10 +494,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetNetmapStatus",
Handler: _ControlService_SetNetmapStatus_Handler,
},
- {
- MethodName: "GetNetmapStatus",
- Handler: _ControlService_GetNetmapStatus_Handler,
- },
{
MethodName: "DropObjects",
Handler: _ControlService_DropObjects_Handler,
@@ -905,6 +510,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
},
+ {
+ MethodName: "EvacuateShard",
+ Handler: _ControlService_EvacuateShard_Handler,
+ },
{
MethodName: "StartShardEvacuation",
Handler: _ControlService_StartShardEvacuation_Handler,
@@ -913,10 +522,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetShardEvacuationStatus",
Handler: _ControlService_GetShardEvacuationStatus_Handler,
},
- {
- MethodName: "ResetShardEvacuationStatus",
- Handler: _ControlService_ResetShardEvacuationStatus_Handler,
- },
{
MethodName: "StopShardEvacuation",
Handler: _ControlService_StopShardEvacuation_Handler,
@@ -929,46 +534,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "Doctor",
Handler: _ControlService_Doctor_Handler,
},
- {
- MethodName: "AddChainLocalOverride",
- Handler: _ControlService_AddChainLocalOverride_Handler,
- },
- {
- MethodName: "GetChainLocalOverride",
- Handler: _ControlService_GetChainLocalOverride_Handler,
- },
- {
- MethodName: "ListChainLocalOverrides",
- Handler: _ControlService_ListChainLocalOverrides_Handler,
- },
- {
- MethodName: "RemoveChainLocalOverride",
- Handler: _ControlService_RemoveChainLocalOverride_Handler,
- },
- {
- MethodName: "RemoveChainLocalOverridesByTarget",
- Handler: _ControlService_RemoveChainLocalOverridesByTarget_Handler,
- },
- {
- MethodName: "ListTargetsLocalOverrides",
- Handler: _ControlService_ListTargetsLocalOverrides_Handler,
- },
- {
- MethodName: "SealWriteCache",
- Handler: _ControlService_SealWriteCache_Handler,
- },
- {
- MethodName: "DetachShards",
- Handler: _ControlService_DetachShards_Handler,
- },
- {
- MethodName: "StartShardRebuild",
- Handler: _ControlService_StartShardRebuild_Handler,
- },
- {
- MethodName: "ListShardsForObject",
- Handler: _ControlService_ListShardsForObject_Handler,
- },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
diff --git a/pkg/services/control/service_test.go b/pkg/services/control/service_test.go
new file mode 100644
index 000000000..8c96e2b73
--- /dev/null
+++ b/pkg/services/control/service_test.go
@@ -0,0 +1,180 @@
+package control_test
+
+import (
+ "bytes"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+)
+
+func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ generateHealthCheckResponseBody(),
+ new(control.HealthCheckResponse_Body),
+ func(m1, m2 protoMessage) bool {
+ return equalHealthCheckResponseBodies(
+ m1.(*control.HealthCheckResponse_Body),
+ m2.(*control.HealthCheckResponse_Body),
+ )
+ },
+ )
+}
+
+func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
+ body := new(control.HealthCheckResponse_Body)
+ body.SetNetmapStatus(control.NetmapStatus_ONLINE)
+ body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
+
+ return body
+}
+
+func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
+ return b1.GetNetmapStatus() == b2.GetNetmapStatus() &&
+ b1.GetHealthStatus() == b2.GetHealthStatus()
+}
+
+func TestSetNetmapStatusRequest_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ generateSetNetmapStatusRequestBody(),
+ new(control.SetNetmapStatusRequest_Body),
+ func(m1, m2 protoMessage) bool {
+ return equalSetnetmapStatusRequestBodies(
+ m1.(*control.SetNetmapStatusRequest_Body),
+ m2.(*control.SetNetmapStatusRequest_Body),
+ )
+ },
+ )
+}
+
+func generateSetNetmapStatusRequestBody() *control.SetNetmapStatusRequest_Body {
+ body := new(control.SetNetmapStatusRequest_Body)
+ body.SetStatus(control.NetmapStatus_ONLINE)
+
+ return body
+}
+
+func equalSetnetmapStatusRequestBodies(b1, b2 *control.SetNetmapStatusRequest_Body) bool {
+ return b1.GetStatus() == b2.GetStatus()
+}
+
+func TestListShardsResponse_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ generateListShardsResponseBody(),
+ new(control.ListShardsResponse_Body),
+ func(m1, m2 protoMessage) bool {
+ return equalListShardResponseBodies(
+ m1.(*control.ListShardsResponse_Body),
+ m2.(*control.ListShardsResponse_Body),
+ )
+ },
+ )
+}
+
+func equalListShardResponseBodies(b1, b2 *control.ListShardsResponse_Body) bool {
+ if len(b1.Shards) != len(b2.Shards) {
+ return false
+ }
+
+ for i := range b1.Shards {
+ if b1.Shards[i].GetMetabasePath() != b2.Shards[i].GetMetabasePath() ||
+ b1.Shards[i].GetWritecachePath() != b2.Shards[i].GetWritecachePath() ||
+ b1.Shards[i].GetPiloramaPath() != b2.Shards[i].GetPiloramaPath() ||
+ !bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[i].GetShard_ID()) {
+ return false
+ }
+
+ info1 := b1.Shards[i].GetBlobstor()
+ info2 := b2.Shards[i].GetBlobstor()
+ if !compareBlobstorInfo(info1, info2) {
+ return false
+ }
+ }
+
+ for i := range b1.Shards {
+ for j := i + 1; j < len(b1.Shards); j++ {
+ if b1.Shards[i].GetMetabasePath() == b2.Shards[j].GetMetabasePath() ||
+ !compareBlobstorInfo(b1.Shards[i].Blobstor, b2.Shards[i].Blobstor) ||
+ b1.Shards[i].GetWritecachePath() == b2.Shards[j].GetWritecachePath() ||
+ bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[j].GetShard_ID()) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+func compareBlobstorInfo(a, b []*control.BlobstorInfo) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i].Type != b[i].Type ||
+ a[i].Path != b[i].Path {
+ return false
+ }
+ }
+ return true
+}
+
+func generateListShardsResponseBody() *control.ListShardsResponse_Body {
+ body := new(control.ListShardsResponse_Body)
+ body.SetShards([]*control.ShardInfo{
+ generateShardInfo(0),
+ generateShardInfo(1),
+ })
+
+ return body
+}
+
+func TestSetShardModeRequest_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ generateSetShardModeRequestBody(),
+ new(control.SetShardModeRequest_Body),
+ func(m1, m2 protoMessage) bool {
+ return equalSetShardModeRequestBodies(
+ m1.(*control.SetShardModeRequest_Body),
+ m2.(*control.SetShardModeRequest_Body),
+ )
+ },
+ )
+}
+
+func generateSetShardModeRequestBody() *control.SetShardModeRequest_Body {
+ body := new(control.SetShardModeRequest_Body)
+ body.SetShardIDList([][]byte{{0, 1, 2, 3, 4}})
+ body.SetMode(control.ShardMode_READ_WRITE)
+
+ return body
+}
+
+func equalSetShardModeRequestBodies(b1, b2 *control.SetShardModeRequest_Body) bool {
+ if b1.GetMode() != b2.GetMode() || len(b1.Shard_ID) != len(b2.Shard_ID) {
+ return false
+ }
+
+ for i := range b1.Shard_ID {
+ if !bytes.Equal(b1.Shard_ID[i], b2.Shard_ID[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func TestSynchronizeTreeRequest_Body_StableMarshal(t *testing.T) {
+ testStableMarshal(t,
+ &control.SynchronizeTreeRequest_Body{
+ ContainerId: []byte{1, 2, 3, 4, 5, 6, 7},
+ TreeId: "someID",
+ Height: 42,
+ },
+ new(control.SynchronizeTreeRequest_Body),
+ func(m1, m2 protoMessage) bool {
+ b1 := m1.(*control.SynchronizeTreeRequest_Body)
+ b2 := m2.(*control.SynchronizeTreeRequest_Body)
+ return bytes.Equal(b1.GetContainerId(), b2.GetContainerId()) &&
+ b1.GetTreeId() == b2.GetTreeId() &&
+ b1.GetHeight() == b2.GetHeight()
+ },
+ )
+}
diff --git a/pkg/services/control/types.go b/pkg/services/control/types.go
new file mode 100644
index 000000000..94f681c55
--- /dev/null
+++ b/pkg/services/control/types.go
@@ -0,0 +1,118 @@
+package control
+
+import (
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// SetKey sets public key used for signing.
+func (x *Signature) SetKey(v []byte) {
+ if x != nil {
+ x.Key = v
+ }
+}
+
+// SetSign sets binary signature.
+func (x *Signature) SetSign(v []byte) {
+ if x != nil {
+ x.Sign = v
+ }
+}
+
+// SetKey sets key of the node attribute.
+func (x *NodeInfo_Attribute) SetKey(v string) {
+ if x != nil {
+ x.Key = v
+ }
+}
+
+// SetValue sets value of the node attribute.
+func (x *NodeInfo_Attribute) SetValue(v string) {
+ if x != nil {
+ x.Value = v
+ }
+}
+
+// SetParents sets parent keys.
+func (x *NodeInfo_Attribute) SetParents(v []string) {
+ if x != nil {
+ x.Parents = v
+ }
+}
+
+// SetPublicKey sets public key of the FrostFS node in a binary format.
+func (x *NodeInfo) SetPublicKey(v []byte) {
+ if x != nil {
+ x.PublicKey = v
+ }
+}
+
+// SetAddresses sets ways to connect to a node.
+func (x *NodeInfo) SetAddresses(v []string) {
+ if x != nil {
+ x.Addresses = v
+ }
+}
+
+// SetAttributes sets attributes of the FrostFS Storage Node.
+func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
+ if x != nil {
+ x.Attributes = v
+ }
+}
+
+// SetState sets state of the FrostFS node.
+func (x *NodeInfo) SetState(v NetmapStatus) {
+ if x != nil {
+ x.State = v
+ }
+}
+
+// SetEpoch sets revision number of the network map.
+func (x *Netmap) SetEpoch(v uint64) {
+ if x != nil {
+ x.Epoch = v
+ }
+}
+
+// SetNodes sets nodes presented in network.
+func (x *Netmap) SetNodes(v []*NodeInfo) {
+ if x != nil {
+ x.Nodes = v
+ }
+}
+
+func (x *Netmap) MarshalJSON() ([]byte, error) {
+ return protojson.MarshalOptions{
+ EmitUnpopulated: true,
+ }.Marshal(x)
+}
+
+// SetID sets identificator of the shard.
+func (x *ShardInfo) SetID(v []byte) {
+ x.Shard_ID = v
+}
+
+// SetMetabasePath sets path to shard's metabase.
+func (x *ShardInfo) SetMetabasePath(v string) {
+ x.MetabasePath = v
+}
+
+// SetWriteCachePath sets path to shard's write-cache.
+func (x *ShardInfo) SetWriteCachePath(v string) {
+ x.WritecachePath = v
+}
+
+// SetPiloramaPath sets path to shard's pilorama.
+func (x *ShardInfo) SetPiloramaPath(v string) {
+ x.PiloramaPath = v
+}
+
+// SetMode sets path to shard's work mode.
+func (x *ShardInfo) SetMode(v ShardMode) {
+ x.Mode = v
+}
+
+// SetErrorCount sets shard's error counter.
+func (x *ShardInfo) SetErrorCount(count uint32) {
+ x.ErrorCount = count
+}
diff --git a/pkg/services/control/types.pb.go b/pkg/services/control/types.pb.go
new file mode 100644
index 000000000..d2ee50770
--- /dev/null
+++ b/pkg/services/control/types.pb.go
@@ -0,0 +1,868 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.9
+// source: pkg/services/control/types.proto
+
+package control
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Status of the storage node in the FrostFS network map.
+type NetmapStatus int32
+
+const (
+ // Undefined status, default value.
+ NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
+ // Node is online.
+ NetmapStatus_ONLINE NetmapStatus = 1
+ // Node is offline.
+ NetmapStatus_OFFLINE NetmapStatus = 2
+ // Node is maintained by the owner.
+ NetmapStatus_MAINTENANCE NetmapStatus = 3
+)
+
+// Enum value maps for NetmapStatus.
+var (
+ NetmapStatus_name = map[int32]string{
+ 0: "STATUS_UNDEFINED",
+ 1: "ONLINE",
+ 2: "OFFLINE",
+ 3: "MAINTENANCE",
+ }
+ NetmapStatus_value = map[string]int32{
+ "STATUS_UNDEFINED": 0,
+ "ONLINE": 1,
+ "OFFLINE": 2,
+ "MAINTENANCE": 3,
+ }
+)
+
+func (x NetmapStatus) Enum() *NetmapStatus {
+ p := new(NetmapStatus)
+ *p = x
+ return p
+}
+
+func (x NetmapStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NetmapStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_control_types_proto_enumTypes[0].Descriptor()
+}
+
+func (NetmapStatus) Type() protoreflect.EnumType {
+ return &file_pkg_services_control_types_proto_enumTypes[0]
+}
+
+func (x NetmapStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NetmapStatus.Descriptor instead.
+func (NetmapStatus) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
+}
+
+// Health status of the storage node application.
+type HealthStatus int32
+
+const (
+ // Undefined status, default value.
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ // Storage node application is starting.
+ HealthStatus_STARTING HealthStatus = 1
+ // Storage node application is started and serves all services.
+ HealthStatus_READY HealthStatus = 2
+ // Storage node application is shutting down.
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+)
+
+// Enum value maps for HealthStatus.
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ }
+)
+
+func (x HealthStatus) Enum() *HealthStatus {
+ p := new(HealthStatus)
+ *p = x
+ return p
+}
+
+func (x HealthStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_control_types_proto_enumTypes[1].Descriptor()
+}
+
+func (HealthStatus) Type() protoreflect.EnumType {
+ return &file_pkg_services_control_types_proto_enumTypes[1]
+}
+
+func (x HealthStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthStatus.Descriptor instead.
+func (HealthStatus) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
+}
+
+// Work mode of the shard.
+type ShardMode int32
+
+const (
+ // Undefined mode, default value.
+ ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
+ // Read-write.
+ ShardMode_READ_WRITE ShardMode = 1
+ // Read-only.
+ ShardMode_READ_ONLY ShardMode = 2
+ // Degraded.
+ ShardMode_DEGRADED ShardMode = 3
+ // DegradedReadOnly.
+ ShardMode_DEGRADED_READ_ONLY ShardMode = 4
+)
+
+// Enum value maps for ShardMode.
+var (
+ ShardMode_name = map[int32]string{
+ 0: "SHARD_MODE_UNDEFINED",
+ 1: "READ_WRITE",
+ 2: "READ_ONLY",
+ 3: "DEGRADED",
+ 4: "DEGRADED_READ_ONLY",
+ }
+ ShardMode_value = map[string]int32{
+ "SHARD_MODE_UNDEFINED": 0,
+ "READ_WRITE": 1,
+ "READ_ONLY": 2,
+ "DEGRADED": 3,
+ "DEGRADED_READ_ONLY": 4,
+ }
+)
+
+func (x ShardMode) Enum() *ShardMode {
+ p := new(ShardMode)
+ *p = x
+ return p
+}
+
+func (x ShardMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ShardMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_control_types_proto_enumTypes[2].Descriptor()
+}
+
+func (ShardMode) Type() protoreflect.EnumType {
+ return &file_pkg_services_control_types_proto_enumTypes[2]
+}
+
+func (x ShardMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ShardMode.Descriptor instead.
+func (ShardMode) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
+}
+
+// Signature of some message.
+type Signature struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Public key used for signing.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Binary signature.
+ Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
+}
+
+func (x *Signature) Reset() {
+ *x = Signature{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Signature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Signature) ProtoMessage() {}
+
+func (x *Signature) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
+func (*Signature) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+
+// FrostFS node description.
+type NodeInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Public key of the FrostFS node in a binary format.
+ PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
+ // Ways to connect to a node.
+ Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"`
+ // Carries list of the FrostFS node attributes in a key-value form. Key name
+ // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
+ // structures with duplicated attribute names or attributes with empty values
+ // will be considered invalid.
+ Attributes []*NodeInfo_Attribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // Carries state of the FrostFS node.
+ State NetmapStatus `protobuf:"varint,4,opt,name=state,proto3,enum=control.NetmapStatus" json:"state,omitempty"`
+}
+
+func (x *NodeInfo) Reset() {
+ *x = NodeInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeInfo) ProtoMessage() {}
+
+func (x *NodeInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
+func (*NodeInfo) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *NodeInfo) GetPublicKey() []byte {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+
+func (x *NodeInfo) GetAddresses() []string {
+ if x != nil {
+ return x.Addresses
+ }
+ return nil
+}
+
+func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *NodeInfo) GetState() NetmapStatus {
+ if x != nil {
+ return x.State
+ }
+ return NetmapStatus_STATUS_UNDEFINED
+}
+
+// Network map structure.
+type Netmap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Network map revision number.
+ Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"`
+ // Nodes presented in network.
+ Nodes []*NodeInfo `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"`
+}
+
+func (x *Netmap) Reset() {
+ *x = Netmap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Netmap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Netmap) ProtoMessage() {}
+
+func (x *Netmap) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Netmap.ProtoReflect.Descriptor instead.
+func (*Netmap) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Netmap) GetEpoch() uint64 {
+ if x != nil {
+ return x.Epoch
+ }
+ return 0
+}
+
+func (x *Netmap) GetNodes() []*NodeInfo {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+
+// Shard description.
+type ShardInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the shard.
+ Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
+ // Path to shard's metabase.
+ MetabasePath string `protobuf:"bytes,2,opt,name=metabase_path,json=metabasePath,proto3" json:"metabase_path,omitempty"`
+ // Shard's blobstor info.
+ Blobstor []*BlobstorInfo `protobuf:"bytes,3,rep,name=blobstor,proto3" json:"blobstor,omitempty"`
+ // Path to shard's write-cache, empty if disabled.
+ WritecachePath string `protobuf:"bytes,4,opt,name=writecache_path,json=writecachePath,proto3" json:"writecache_path,omitempty"`
+ // Work mode of the shard.
+ Mode ShardMode `protobuf:"varint,5,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
+ // Amount of errors occured.
+ ErrorCount uint32 `protobuf:"varint,6,opt,name=errorCount,proto3" json:"errorCount,omitempty"`
+ // Path to shard's pilorama storage.
+ PiloramaPath string `protobuf:"bytes,7,opt,name=pilorama_path,json=piloramaPath,proto3" json:"pilorama_path,omitempty"`
+}
+
+func (x *ShardInfo) Reset() {
+ *x = ShardInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ShardInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ShardInfo) ProtoMessage() {}
+
+func (x *ShardInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ShardInfo.ProtoReflect.Descriptor instead.
+func (*ShardInfo) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ShardInfo) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+
+func (x *ShardInfo) GetMetabasePath() string {
+ if x != nil {
+ return x.MetabasePath
+ }
+ return ""
+}
+
+func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
+ if x != nil {
+ return x.Blobstor
+ }
+ return nil
+}
+
+func (x *ShardInfo) GetWritecachePath() string {
+ if x != nil {
+ return x.WritecachePath
+ }
+ return ""
+}
+
+func (x *ShardInfo) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return ShardMode_SHARD_MODE_UNDEFINED
+}
+
+func (x *ShardInfo) GetErrorCount() uint32 {
+ if x != nil {
+ return x.ErrorCount
+ }
+ return 0
+}
+
+func (x *ShardInfo) GetPiloramaPath() string {
+ if x != nil {
+ return x.PiloramaPath
+ }
+ return ""
+}
+
+// Blobstor component description.
+type BlobstorInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path to the root.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // Component type.
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+}
+
+func (x *BlobstorInfo) Reset() {
+ *x = BlobstorInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BlobstorInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BlobstorInfo) ProtoMessage() {}
+
+func (x *BlobstorInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BlobstorInfo.ProtoReflect.Descriptor instead.
+func (*BlobstorInfo) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *BlobstorInfo) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *BlobstorInfo) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+// Administrator-defined Attributes of the FrostFS Storage Node.
+//
+// `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
+// string. Value can't be empty.
+//
+// Node's attributes are mostly used during Storage Policy evaluation to
+// calculate object's placement and find a set of nodes satisfying policy
+// requirements. There are some "well-known" node attributes common to all the
+// Storage Nodes in the network and used implicitly with default values if not
+// explicitly set:
+//
+// - Capacity \
+// Total available disk space in Gigabytes.
+// - Price \
+// Price in GAS tokens for storing one GB of data during one Epoch. In node
+// attributes it's a string presenting floating point number with comma or
+// point delimiter for decimal part. In the Network Map it will be saved as
+// 64-bit unsigned integer representing number of minimal token fractions.
+// - Locode \
+// Node's geographic location in
+// [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
+// format approximated to the nearest point defined in standard.
+// - Country \
+// Country code in
+// [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
+// format. Calculated automatically from `Locode` attribute
+// - Region \
+// Country's administative subdivision where node is located. Calculated
+// automatically from `Locode` attribute based on `SubDiv` field. Presented
+// in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
+// - City \
+// City, town, village or rural area name where node is located written
+// without diacritics . Calculated automatically from `Locode` attribute.
+//
+// For detailed description of each well-known attribute please see the
+// corresponding section in FrostFS Technical specification.
+type NodeInfo_Attribute struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Key of the node attribute.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Value of the node attribute.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Parent keys, if any. For example for `City` it could be `Region` and
+ // `Country`.
+ Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
+}
+
+func (x *NodeInfo_Attribute) Reset() {
+ *x = NodeInfo_Attribute{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_types_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeInfo_Attribute) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeInfo_Attribute) ProtoMessage() {}
+
+func (x *NodeInfo_Attribute) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_types_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeInfo_Attribute.ProtoReflect.Descriptor instead.
+func (*NodeInfo_Attribute) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *NodeInfo_Attribute) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *NodeInfo_Attribute) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+func (x *NodeInfo_Attribute) GetParents() []string {
+ if x != nil {
+ return x.Parents
+ }
+ return nil
+}
+
+var File_pkg_services_control_types_proto protoreflect.FileDescriptor
+
+var file_pkg_services_control_types_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x36, 0x0a, 0x09, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69,
+ 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12,
+ 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x3b, 0x0a,
+ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
+ 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x4d, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x47, 0x0a, 0x06, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x27, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22,
+ 0x94, 0x02, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a,
+ 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a,
+ 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
+ 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72,
+ 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x63, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64,
+ 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61, 0x6d, 0x61, 0x5f, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61,
+ 0x6d, 0x61, 0x50, 0x61, 0x74, 0x68, 0x22, 0x36, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
+ 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x4e,
+ 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14,
+ 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01,
+ 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x2a, 0x57,
+ 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b,
+ 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f,
+ 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53,
+ 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41,
+ 0x44, 0x59, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47,
+ 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x03, 0x2a, 0x6a, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x4f,
+ 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a,
+ 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x44,
+ 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c,
+ 0x59, 0x10, 0x04, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74,
+ 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64,
+ 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_control_types_proto_rawDescOnce sync.Once
+ file_pkg_services_control_types_proto_rawDescData = file_pkg_services_control_types_proto_rawDesc
+)
+
+func file_pkg_services_control_types_proto_rawDescGZIP() []byte {
+ file_pkg_services_control_types_proto_rawDescOnce.Do(func() {
+ file_pkg_services_control_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_types_proto_rawDescData)
+ })
+ return file_pkg_services_control_types_proto_rawDescData
+}
+
+var file_pkg_services_control_types_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_pkg_services_control_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_pkg_services_control_types_proto_goTypes = []interface{}{
+ (NetmapStatus)(0), // 0: control.NetmapStatus
+ (HealthStatus)(0), // 1: control.HealthStatus
+ (ShardMode)(0), // 2: control.ShardMode
+ (*Signature)(nil), // 3: control.Signature
+ (*NodeInfo)(nil), // 4: control.NodeInfo
+ (*Netmap)(nil), // 5: control.Netmap
+ (*ShardInfo)(nil), // 6: control.ShardInfo
+ (*BlobstorInfo)(nil), // 7: control.BlobstorInfo
+ (*NodeInfo_Attribute)(nil), // 8: control.NodeInfo.Attribute
+}
+var file_pkg_services_control_types_proto_depIdxs = []int32{
+ 8, // 0: control.NodeInfo.attributes:type_name -> control.NodeInfo.Attribute
+ 0, // 1: control.NodeInfo.state:type_name -> control.NetmapStatus
+ 4, // 2: control.Netmap.nodes:type_name -> control.NodeInfo
+ 7, // 3: control.ShardInfo.blobstor:type_name -> control.BlobstorInfo
+ 2, // 4: control.ShardInfo.mode:type_name -> control.ShardMode
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_control_types_proto_init() }
+func file_pkg_services_control_types_proto_init() {
+ if File_pkg_services_control_types_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_control_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Signature); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Netmap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ShardInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BlobstorInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeInfo_Attribute); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_control_types_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_services_control_types_proto_goTypes,
+ DependencyIndexes: file_pkg_services_control_types_proto_depIdxs,
+ EnumInfos: file_pkg_services_control_types_proto_enumTypes,
+ MessageInfos: file_pkg_services_control_types_proto_msgTypes,
+ }.Build()
+ File_pkg_services_control_types_proto = out.File
+ file_pkg_services_control_types_proto_rawDesc = nil
+ file_pkg_services_control_types_proto_goTypes = nil
+ file_pkg_services_control_types_proto_depIdxs = nil
+}
diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto
index d8135ed64..5b4844580 100644
--- a/pkg/services/control/types.proto
+++ b/pkg/services/control/types.proto
@@ -6,189 +6,163 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/con
// Signature of some message.
message Signature {
- // Public key used for signing.
- bytes key = 1 [ json_name = "key" ];
+ // Public key used for signing.
+ bytes key = 1 [json_name = "key"];
- // Binary signature.
- bytes sign = 2 [ json_name = "signature" ];
+ // Binary signature.
+ bytes sign = 2 [json_name = "signature"];
}
// Status of the storage node in the FrostFS network map.
enum NetmapStatus {
- // Undefined status, default value.
- STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ STATUS_UNDEFINED = 0;
- // Node is online.
- ONLINE = 1;
+ // Node is online.
+ ONLINE = 1;
- // Node is offline.
- OFFLINE = 2;
+ // Node is offline.
+ OFFLINE = 2;
- // Node is maintained by the owner.
- MAINTENANCE = 3;
+ // Node is maintained by the owner.
+ MAINTENANCE = 3;
}
// FrostFS node description.
message NodeInfo {
- // Public key of the FrostFS node in a binary format.
- bytes public_key = 1 [ json_name = "publicKey" ];
+ // Public key of the FrostFS node in a binary format.
+ bytes public_key = 1 [json_name = "publicKey"];
- // Ways to connect to a node.
- repeated string addresses = 2 [ json_name = "addresses" ];
+ // Ways to connect to a node.
+ repeated string addresses = 2 [json_name = "addresses"];
- // Administrator-defined Attributes of the FrostFS Storage Node.
- //
- // `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
- // string. Value can't be empty.
- //
- // Node's attributes are mostly used during Storage Policy evaluation to
- // calculate object's placement and find a set of nodes satisfying policy
- // requirements. There are some "well-known" node attributes common to all the
- // Storage Nodes in the network and used implicitly with default values if not
- // explicitly set:
- //
- // * Capacity \
+ // Administrator-defined Attributes of the FrostFS Storage Node.
+ //
+ // `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
+ // string. Value can't be empty.
+ //
+ // Node's attributes are mostly used during Storage Policy evaluation to
+ // calculate object's placement and find a set of nodes satisfying policy
+ // requirements. There are some "well-known" node attributes common to all the
+ // Storage Nodes in the network and used implicitly with default values if not
+ // explicitly set:
+ //
+ // * Capacity \
// Total available disk space in Gigabytes.
- // * Price \
+ // * Price \
// Price in GAS tokens for storing one GB of data during one Epoch. In node
- // attributes it's a string presenting floating point number with comma or
- // point delimiter for decimal part. In the Network Map it will be saved as
- // 64-bit unsigned integer representing number of minimal token fractions.
- // * Locode \
+ // attributes it's a string presenting floating point number with comma or
+ // point delimiter for decimal part. In the Network Map it will be saved as
+ // 64-bit unsigned integer representing number of minimal token fractions.
+ // * Locode \
// Node's geographic location in
- // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
- // format approximated to the nearest point defined in standard.
- // * Country \
+ // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
+ // format approximated to the nearest point defined in standard.
+ // * Country \
// Country code in
- // [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
- // format. Calculated automatically from `Locode` attribute
- // * Region \
+ // [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
+ // format. Calculated automatically from `Locode` attribute
+ // * Region \
// Country's administative subdivision where node is located. Calculated
- // automatically from `Locode` attribute based on `SubDiv` field. Presented
- // in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
- // * City \
+ // automatically from `Locode` attribute based on `SubDiv` field. Presented
+ // in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
+ // * City \
// City, town, village or rural area name where node is located written
- // without diacritics . Calculated automatically from `Locode` attribute.
- //
- // For detailed description of each well-known attribute please see the
- // corresponding section in FrostFS Technical specification.
- message Attribute {
- // Key of the node attribute.
- string key = 1 [ json_name = "key" ];
+ // without diacritics . Calculated automatically from `Locode` attribute.
+ //
+ // For detailed description of each well-known attribute please see the
+ // corresponding section in FrostFS Technical specification.
+ message Attribute {
+ // Key of the node attribute.
+ string key = 1 [json_name = "key"];
- // Value of the node attribute.
- string value = 2 [ json_name = "value" ];
+ // Value of the node attribute.
+ string value = 2 [json_name = "value"];
- // Parent keys, if any. For example for `City` it could be `Region` and
- // `Country`.
- repeated string parents = 3 [ json_name = "parents" ];
- }
- // Carries list of the FrostFS node attributes in a key-value form. Key name
- // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
- // structures with duplicated attribute names or attributes with empty values
- // will be considered invalid.
- repeated Attribute attributes = 3 [ json_name = "attributes" ];
+ // Parent keys, if any. For example for `City` it could be `Region` and
+ // `Country`.
+ repeated string parents = 3 [json_name = "parents"];
+ }
+ // Carries list of the FrostFS node attributes in a key-value form. Key name
+ // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
+ // structures with duplicated attribute names or attributes with empty values
+ // will be considered invalid.
+ repeated Attribute attributes = 3 [json_name = "attributes"];
- // Carries state of the FrostFS node.
- NetmapStatus state = 4 [ json_name = "state" ];
+ // Carries state of the FrostFS node.
+ NetmapStatus state = 4 [json_name = "state"];
}
// Network map structure.
message Netmap {
- // Network map revision number.
- uint64 epoch = 1 [ json_name = "epoch" ];
+ // Network map revision number.
+ uint64 epoch = 1 [json_name = "epoch"];
- // Nodes presented in network.
- repeated NodeInfo nodes = 2 [ json_name = "nodes" ];
+ // Nodes presented in network.
+ repeated NodeInfo nodes = 2 [json_name = "nodes"];
}
// Health status of the storage node application.
enum HealthStatus {
- // Undefined status, default value.
- HEALTH_STATUS_UNDEFINED = 0;
+ // Undefined status, default value.
+ HEALTH_STATUS_UNDEFINED = 0;
- // Storage node application is starting.
- STARTING = 1;
+ // Storage node application is starting.
+ STARTING = 1;
- // Storage node application is started and serves all services.
- READY = 2;
+ // Storage node application is started and serves all services.
+ READY = 2;
- // Storage node application is shutting down.
- SHUTTING_DOWN = 3;
-
- // Storage node application is reconfiguring.
- RECONFIGURING = 4;
+ // Storage node application is shutting down.
+ SHUTTING_DOWN = 3;
}
// Shard description.
message ShardInfo {
- // ID of the shard.
- bytes shard_ID = 1 [ json_name = "shardID" ];
+ // ID of the shard.
+ bytes shard_ID = 1 [json_name = "shardID"];
- // Path to shard's metabase.
- string metabase_path = 2 [ json_name = "metabasePath" ];
+ // Path to shard's metabase.
+ string metabase_path = 2 [json_name = "metabasePath"];
- // Shard's blobstor info.
- repeated BlobstorInfo blobstor = 3 [ json_name = "blobstor" ];
+ // Shard's blobstor info.
+ repeated BlobstorInfo blobstor = 3 [json_name = "blobstor"];
- // Path to shard's write-cache, empty if disabled.
- string writecache_path = 4 [ json_name = "writecachePath" ];
+ // Path to shard's write-cache, empty if disabled.
+ string writecache_path = 4 [json_name = "writecachePath"];
- // Work mode of the shard.
- ShardMode mode = 5;
+ // Work mode of the shard.
+ ShardMode mode = 5;
- // Amount of errors occured.
- uint32 errorCount = 6;
+ // Amount of errors occured.
+ uint32 errorCount = 6;
- // Path to shard's pilorama storage.
- string pilorama_path = 7 [ json_name = "piloramaPath" ];
-
- // Evacuation status.
- bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ];
+ // Path to shard's pilorama storage.
+ string pilorama_path = 7 [json_name = "piloramaPath"];
}
// Blobstor component description.
message BlobstorInfo {
- // Path to the root.
- string path = 1 [ json_name = "path" ];
- // Component type.
- string type = 2 [ json_name = "type" ];
+ // Path to the root.
+ string path = 1 [json_name = "path"];
+ // Component type.
+ string type = 2 [json_name = "type"];
}
// Work mode of the shard.
enum ShardMode {
- // Undefined mode, default value.
- SHARD_MODE_UNDEFINED = 0;
+ // Undefined mode, default value.
+ SHARD_MODE_UNDEFINED = 0;
- // Read-write.
- READ_WRITE = 1;
+ // Read-write.
+ READ_WRITE = 1;
- // Read-only.
- READ_ONLY = 2;
+ // Read-only.
+ READ_ONLY = 2;
- // Degraded.
- DEGRADED = 3;
+ // Degraded.
+ DEGRADED = 3;
- // DegradedReadOnly.
- DEGRADED_READ_ONLY = 4;
-}
-
-// ChainTarget is an object to which local overrides
-// are applied.
-message ChainTarget {
- enum TargetType {
- UNDEFINED = 0;
-
- NAMESPACE = 1;
-
- CONTAINER = 2;
-
- USER = 3;
-
- GROUP = 4;
- }
-
- TargetType type = 1;
-
- string Name = 2;
+ // DegradedReadOnly.
+ DEGRADED_READ_ONLY = 4;
}
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 69d87292d..ab246dbdc 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -2,149 +2,7 @@
package control
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
-)
-
-type NetmapStatus int32
-
-const (
- NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
- NetmapStatus_ONLINE NetmapStatus = 1
- NetmapStatus_OFFLINE NetmapStatus = 2
- NetmapStatus_MAINTENANCE NetmapStatus = 3
-)
-
-var (
- NetmapStatus_name = map[int32]string{
- 0: "STATUS_UNDEFINED",
- 1: "ONLINE",
- 2: "OFFLINE",
- 3: "MAINTENANCE",
- }
- NetmapStatus_value = map[string]int32{
- "STATUS_UNDEFINED": 0,
- "ONLINE": 1,
- "OFFLINE": 2,
- "MAINTENANCE": 3,
- }
-)
-
-func (x NetmapStatus) String() string {
- if v, ok := NetmapStatus_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *NetmapStatus) FromString(s string) bool {
- if v, ok := NetmapStatus_value[s]; ok {
- *x = NetmapStatus(v)
- return true
- }
- return false
-}
-
-type HealthStatus int32
-
-const (
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- HealthStatus_STARTING HealthStatus = 1
- HealthStatus_READY HealthStatus = 2
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
- HealthStatus_RECONFIGURING HealthStatus = 4
-)
-
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- 4: "RECONFIGURING",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- "RECONFIGURING": 4,
- }
-)
-
-func (x HealthStatus) String() string {
- if v, ok := HealthStatus_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *HealthStatus) FromString(s string) bool {
- if v, ok := HealthStatus_value[s]; ok {
- *x = HealthStatus(v)
- return true
- }
- return false
-}
-
-type ShardMode int32
-
-const (
- ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
- ShardMode_READ_WRITE ShardMode = 1
- ShardMode_READ_ONLY ShardMode = 2
- ShardMode_DEGRADED ShardMode = 3
- ShardMode_DEGRADED_READ_ONLY ShardMode = 4
-)
-
-var (
- ShardMode_name = map[int32]string{
- 0: "SHARD_MODE_UNDEFINED",
- 1: "READ_WRITE",
- 2: "READ_ONLY",
- 3: "DEGRADED",
- 4: "DEGRADED_READ_ONLY",
- }
- ShardMode_value = map[string]int32{
- "SHARD_MODE_UNDEFINED": 0,
- "READ_WRITE": 1,
- "READ_ONLY": 2,
- "DEGRADED": 3,
- "DEGRADED_READ_ONLY": 4,
- }
-)
-
-func (x ShardMode) String() string {
- if v, ok := ShardMode_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *ShardMode) FromString(s string) bool {
- if v, ok := ShardMode_value[s]; ok {
- *x = ShardMode(v)
- return true
- }
- return false
-}
-
-type Signature struct {
- Key []byte `json:"key"`
- Sign []byte `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*Signature)(nil)
- _ encoding.ProtoUnmarshaler = (*Signature)(nil)
- _ json.Marshaler = (*Signature)(nil)
- _ json.Unmarshaler = (*Signature)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -158,186 +16,27 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *Signature) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *Signature) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendBytes(1, x.Key)
- }
- if len(x.Sign) != 0 {
- mm.AppendBytes(2, x.Sign)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.Key)
+ offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "Signature")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Sign
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Sign")
- }
- x.Sign = data
- }
- }
- return nil
-}
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-func (x *Signature) SetKey(v []byte) {
- x.Key = v
-}
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-func (x *Signature) SetSign(v []byte) {
- x.Sign = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Signature) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *Signature) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Key = f
- }
- case "signature":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Sign = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type NodeInfo_Attribute struct {
- Key string `json:"key"`
- Value string `json:"value"`
- Parents []string `json:"parents"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*NodeInfo_Attribute)(nil)
- _ encoding.ProtoUnmarshaler = (*NodeInfo_Attribute)(nil)
- _ json.Marshaler = (*NodeInfo_Attribute)(nil)
- _ json.Unmarshaler = (*NodeInfo_Attribute)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -351,215 +50,28 @@ func (x *NodeInfo_Attribute) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *NodeInfo_Attribute) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *NodeInfo_Attribute) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *NodeInfo_Attribute) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendString(1, x.Key)
- }
- if len(x.Value) != 0 {
- mm.AppendString(2, x.Value)
- }
- for j := range x.Parents {
- mm.AppendString(3, x.Parents[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.StringMarshal(1, buf[offset:], x.Key)
+ offset += proto.StringMarshal(2, buf[offset:], x.Value)
+ offset += proto.RepeatedStringMarshal(3, buf[offset:], x.Parents)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *NodeInfo_Attribute) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "NodeInfo_Attribute")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Value
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Value")
- }
- x.Value = data
- case 3: // Parents
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Parents")
- }
- x.Parents = append(x.Parents, data)
- }
- }
- return nil
-}
-func (x *NodeInfo_Attribute) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-func (x *NodeInfo_Attribute) SetKey(v string) {
- x.Key = v
-}
-func (x *NodeInfo_Attribute) GetValue() string {
- if x != nil {
- return x.Value
- }
- return ""
-}
-func (x *NodeInfo_Attribute) SetValue(v string) {
- x.Value = v
-}
-func (x *NodeInfo_Attribute) GetParents() []string {
- if x != nil {
- return x.Parents
- }
- return nil
-}
-func (x *NodeInfo_Attribute) SetParents(v []string) {
- x.Parents = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *NodeInfo_Attribute) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- out.String(x.Key)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
- out.RawString(prefix)
- out.String(x.Value)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parents\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Parents {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Parents[i])
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *NodeInfo_Attribute) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f string
- f = in.String()
- x.Key = f
- }
- case "value":
- {
- var f string
- f = in.String()
- x.Value = f
- }
- case "parents":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Parents = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type NodeInfo struct {
- PublicKey []byte `json:"publicKey"`
- Addresses []string `json:"addresses"`
- Attributes []NodeInfo_Attribute `json:"attributes"`
- State NetmapStatus `json:"state"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*NodeInfo)(nil)
- _ encoding.ProtoUnmarshaler = (*NodeInfo)(nil)
- _ json.Marshaler = (*NodeInfo)(nil)
- _ json.Unmarshaler = (*NodeInfo)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -570,303 +82,37 @@ func (x *NodeInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.PublicKey)
size += proto.RepeatedStringSize(2, x.Addresses)
for i := range x.Attributes {
- size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i])
+ size += proto.NestedStructureSize(3, x.Attributes[i])
}
size += proto.EnumSize(4, int32(x.State))
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *NodeInfo) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *NodeInfo) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.PublicKey) != 0 {
- mm.AppendBytes(1, x.PublicKey)
- }
- for j := range x.Addresses {
- mm.AppendString(2, x.Addresses[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.PublicKey)
+ offset += proto.RepeatedStringMarshal(2, buf[offset:], x.Addresses)
for i := range x.Attributes {
- x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
- }
- if int32(x.State) != 0 {
- mm.AppendInt32(4, int32(x.State))
+ offset += proto.NestedStructureMarshal(3, buf[offset:], x.Attributes[i])
}
+ offset += proto.EnumMarshal(4, buf[offset:], int32(x.State))
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "NodeInfo")
- }
- switch fc.FieldNum {
- case 1: // PublicKey
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "PublicKey")
- }
- x.PublicKey = data
- case 2: // Addresses
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Addresses")
- }
- x.Addresses = append(x.Addresses, data)
- case 3: // Attributes
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Attributes")
- }
- x.Attributes = append(x.Attributes, NodeInfo_Attribute{})
- ff := &x.Attributes[len(x.Attributes)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 4: // State
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "State")
- }
- x.State = NetmapStatus(data)
- }
- }
- return nil
-}
-func (x *NodeInfo) GetPublicKey() []byte {
- if x != nil {
- return x.PublicKey
- }
- return nil
-}
-func (x *NodeInfo) SetPublicKey(v []byte) {
- x.PublicKey = v
-}
-func (x *NodeInfo) GetAddresses() []string {
- if x != nil {
- return x.Addresses
- }
- return nil
-}
-func (x *NodeInfo) SetAddresses(v []string) {
- x.Addresses = v
-}
-func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) {
- x.Attributes = v
-}
-func (x *NodeInfo) GetState() NetmapStatus {
- if x != nil {
- return x.State
- }
- return 0
-}
-func (x *NodeInfo) SetState(v NetmapStatus) {
- x.State = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *NodeInfo) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"publicKey\":"
- out.RawString(prefix)
- if x.PublicKey != nil {
- out.Base64Bytes(x.PublicKey)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"addresses\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Addresses {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Addresses[i])
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"attributes\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Attributes {
- if i != 0 {
- out.RawByte(',')
- }
- x.Attributes[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"state\":"
- out.RawString(prefix)
- v := int32(x.State)
- if vv, ok := NetmapStatus_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *NodeInfo) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "publicKey":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.PublicKey = f
- }
- case "addresses":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Addresses = list
- in.Delim(']')
- }
- case "attributes":
- {
- var f NodeInfo_Attribute
- var list []NodeInfo_Attribute
- in.Delim('[')
- for !in.IsDelim(']') {
- f = NodeInfo_Attribute{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Attributes = list
- in.Delim(']')
- }
- case "state":
- {
- var f NetmapStatus
- var parsedValue NetmapStatus
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := NetmapStatus_value[v]; ok {
- parsedValue = NetmapStatus(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = NetmapStatus(vv)
- case float64:
- parsedValue = NetmapStatus(v)
- }
- f = parsedValue
- x.State = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type Netmap struct {
- Epoch uint64 `json:"epoch"`
- Nodes []NodeInfo `json:"nodes"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*Netmap)(nil)
- _ encoding.ProtoUnmarshaler = (*Netmap)(nil)
- _ json.Marshaler = (*Netmap)(nil)
- _ json.Unmarshaler = (*Netmap)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -876,205 +122,34 @@ func (x *Netmap) StableSize() (size int) {
}
size += proto.UInt64Size(1, x.Epoch)
for i := range x.Nodes {
- size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i])
+ size += proto.NestedStructureSize(2, x.Nodes[i])
}
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *Netmap) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *Netmap) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Epoch != 0 {
- mm.AppendUint64(1, x.Epoch)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.Epoch)
for i := range x.Nodes {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Nodes[i])
}
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "Netmap")
- }
- switch fc.FieldNum {
- case 1: // Epoch
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Epoch")
- }
- x.Epoch = data
- case 2: // Nodes
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Nodes")
- }
- x.Nodes = append(x.Nodes, NodeInfo{})
- ff := &x.Nodes[len(x.Nodes)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *Netmap) GetEpoch() uint64 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-func (x *Netmap) SetEpoch(v uint64) {
- x.Epoch = v
-}
-func (x *Netmap) GetNodes() []NodeInfo {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-func (x *Netmap) SetNodes(v []NodeInfo) {
- x.Nodes = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Netmap) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"epoch\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Nodes {
- if i != 0 {
- out.RawByte(',')
- }
- x.Nodes[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *Netmap) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "epoch":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Epoch = f
- }
- case "nodes":
- {
- var f NodeInfo
- var list []NodeInfo
- in.Delim('[')
- for !in.IsDelim(']') {
- f = NodeInfo{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Nodes = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ShardInfo struct {
- Shard_ID []byte `json:"shardID"`
- MetabasePath string `json:"metabasePath"`
- Blobstor []BlobstorInfo `json:"blobstor"`
- WritecachePath string `json:"writecachePath"`
- Mode ShardMode `json:"mode"`
- ErrorCount uint32 `json:"errorCount"`
- PiloramaPath string `json:"piloramaPath"`
- EvacuationInProgress bool `json:"evacuationInProgress"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ShardInfo)(nil)
- _ encoding.ProtoUnmarshaler = (*ShardInfo)(nil)
- _ json.Marshaler = (*ShardInfo)(nil)
- _ json.Unmarshaler = (*ShardInfo)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1085,437 +160,43 @@ func (x *ShardInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.StringSize(2, x.MetabasePath)
for i := range x.Blobstor {
- size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i])
+ size += proto.NestedStructureSize(3, x.Blobstor[i])
}
size += proto.StringSize(4, x.WritecachePath)
size += proto.EnumSize(5, int32(x.Mode))
size += proto.UInt32Size(6, x.ErrorCount)
size += proto.StringSize(7, x.PiloramaPath)
- size += proto.BoolSize(8, x.EvacuationInProgress)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ShardInfo) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ShardInfo) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Shard_ID) != 0 {
- mm.AppendBytes(1, x.Shard_ID)
- }
- if len(x.MetabasePath) != 0 {
- mm.AppendString(2, x.MetabasePath)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
+ offset += proto.StringMarshal(2, buf[offset:], x.MetabasePath)
for i := range x.Blobstor {
- x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
- }
- if len(x.WritecachePath) != 0 {
- mm.AppendString(4, x.WritecachePath)
- }
- if int32(x.Mode) != 0 {
- mm.AppendInt32(5, int32(x.Mode))
- }
- if x.ErrorCount != 0 {
- mm.AppendUint32(6, x.ErrorCount)
- }
- if len(x.PiloramaPath) != 0 {
- mm.AppendString(7, x.PiloramaPath)
- }
- if x.EvacuationInProgress {
- mm.AppendBool(8, x.EvacuationInProgress)
+ offset += proto.NestedStructureMarshal(3, buf[offset:], x.Blobstor[i])
}
+ offset += proto.StringMarshal(4, buf[offset:], x.WritecachePath)
+ offset += proto.EnumMarshal(5, buf[offset:], int32(x.Mode))
+ offset += proto.UInt32Marshal(6, buf[offset:], x.ErrorCount)
+ offset += proto.StringMarshal(7, buf[offset:], x.PiloramaPath)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ShardInfo")
- }
- switch fc.FieldNum {
- case 1: // Shard_ID
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
- }
- x.Shard_ID = data
- case 2: // MetabasePath
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "MetabasePath")
- }
- x.MetabasePath = data
- case 3: // Blobstor
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Blobstor")
- }
- x.Blobstor = append(x.Blobstor, BlobstorInfo{})
- ff := &x.Blobstor[len(x.Blobstor)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 4: // WritecachePath
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "WritecachePath")
- }
- x.WritecachePath = data
- case 5: // Mode
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Mode")
- }
- x.Mode = ShardMode(data)
- case 6: // ErrorCount
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ErrorCount")
- }
- x.ErrorCount = data
- case 7: // PiloramaPath
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath")
- }
- x.PiloramaPath = data
- case 8: // EvacuationInProgress
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress")
- }
- x.EvacuationInProgress = data
- }
- }
- return nil
-}
-func (x *ShardInfo) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-func (x *ShardInfo) SetShard_ID(v []byte) {
- x.Shard_ID = v
-}
-func (x *ShardInfo) GetMetabasePath() string {
- if x != nil {
- return x.MetabasePath
- }
- return ""
-}
-func (x *ShardInfo) SetMetabasePath(v string) {
- x.MetabasePath = v
-}
-func (x *ShardInfo) GetBlobstor() []BlobstorInfo {
- if x != nil {
- return x.Blobstor
- }
- return nil
-}
-func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) {
- x.Blobstor = v
-}
-func (x *ShardInfo) GetWritecachePath() string {
- if x != nil {
- return x.WritecachePath
- }
- return ""
-}
-func (x *ShardInfo) SetWritecachePath(v string) {
- x.WritecachePath = v
-}
-func (x *ShardInfo) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return 0
-}
-func (x *ShardInfo) SetMode(v ShardMode) {
- x.Mode = v
-}
-func (x *ShardInfo) GetErrorCount() uint32 {
- if x != nil {
- return x.ErrorCount
- }
- return 0
-}
-func (x *ShardInfo) SetErrorCount(v uint32) {
- x.ErrorCount = v
-}
-func (x *ShardInfo) GetPiloramaPath() string {
- if x != nil {
- return x.PiloramaPath
- }
- return ""
-}
-func (x *ShardInfo) SetPiloramaPath(v string) {
- x.PiloramaPath = v
-}
-func (x *ShardInfo) GetEvacuationInProgress() bool {
- if x != nil {
- return x.EvacuationInProgress
- }
- return false
-}
-func (x *ShardInfo) SetEvacuationInProgress(v bool) {
- x.EvacuationInProgress = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ShardInfo) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"shardID\":"
- out.RawString(prefix)
- if x.Shard_ID != nil {
- out.Base64Bytes(x.Shard_ID)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"metabasePath\":"
- out.RawString(prefix)
- out.String(x.MetabasePath)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"blobstor\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Blobstor {
- if i != 0 {
- out.RawByte(',')
- }
- x.Blobstor[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"writecachePath\":"
- out.RawString(prefix)
- out.String(x.WritecachePath)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"mode\":"
- out.RawString(prefix)
- v := int32(x.Mode)
- if vv, ok := ShardMode_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"errorCount\":"
- out.RawString(prefix)
- out.Uint32(x.ErrorCount)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"piloramaPath\":"
- out.RawString(prefix)
- out.String(x.PiloramaPath)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"evacuationInProgress\":"
- out.RawString(prefix)
- out.Bool(x.EvacuationInProgress)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ShardInfo) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "shardID":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Shard_ID = f
- }
- case "metabasePath":
- {
- var f string
- f = in.String()
- x.MetabasePath = f
- }
- case "blobstor":
- {
- var f BlobstorInfo
- var list []BlobstorInfo
- in.Delim('[')
- for !in.IsDelim(']') {
- f = BlobstorInfo{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Blobstor = list
- in.Delim(']')
- }
- case "writecachePath":
- {
- var f string
- f = in.String()
- x.WritecachePath = f
- }
- case "mode":
- {
- var f ShardMode
- var parsedValue ShardMode
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := ShardMode_value[v]; ok {
- parsedValue = ShardMode(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = ShardMode(vv)
- case float64:
- parsedValue = ShardMode(v)
- }
- f = parsedValue
- x.Mode = f
- }
- case "errorCount":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.ErrorCount = f
- }
- case "piloramaPath":
- {
- var f string
- f = in.String()
- x.PiloramaPath = f
- }
- case "evacuationInProgress":
- {
- var f bool
- f = in.Bool()
- x.EvacuationInProgress = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type BlobstorInfo struct {
- Path string `json:"path"`
- Type string `json:"type"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*BlobstorInfo)(nil)
- _ encoding.ProtoUnmarshaler = (*BlobstorInfo)(nil)
- _ json.Marshaler = (*BlobstorInfo)(nil)
- _ json.Unmarshaler = (*BlobstorInfo)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1528,382 +209,23 @@ func (x *BlobstorInfo) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *BlobstorInfo) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *BlobstorInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if len(x.Path) != 0 {
- mm.AppendString(1, x.Path)
- }
- if len(x.Type) != 0 {
- mm.AppendString(2, x.Type)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *BlobstorInfo) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "BlobstorInfo")
- }
- switch fc.FieldNum {
- case 1: // Path
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Path")
- }
- x.Path = data
- case 2: // Type
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Type")
- }
- x.Type = data
- }
- }
- return nil
-}
-func (x *BlobstorInfo) GetPath() string {
- if x != nil {
- return x.Path
- }
- return ""
-}
-func (x *BlobstorInfo) SetPath(v string) {
- x.Path = v
-}
-func (x *BlobstorInfo) GetType() string {
- if x != nil {
- return x.Type
- }
- return ""
-}
-func (x *BlobstorInfo) SetType(v string) {
- x.Type = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *BlobstorInfo) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
- out.RawString(prefix)
- out.String(x.Path)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"type\":"
- out.RawString(prefix)
- out.String(x.Type)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *BlobstorInfo) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *BlobstorInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "path":
- {
- var f string
- f = in.String()
- x.Path = f
- }
- case "type":
- {
- var f string
- f = in.String()
- x.Type = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ChainTarget_TargetType int32
-
-const (
- ChainTarget_UNDEFINED ChainTarget_TargetType = 0
- ChainTarget_NAMESPACE ChainTarget_TargetType = 1
- ChainTarget_CONTAINER ChainTarget_TargetType = 2
- ChainTarget_USER ChainTarget_TargetType = 3
- ChainTarget_GROUP ChainTarget_TargetType = 4
-)
-
-var (
- ChainTarget_TargetType_name = map[int32]string{
- 0: "UNDEFINED",
- 1: "NAMESPACE",
- 2: "CONTAINER",
- 3: "USER",
- 4: "GROUP",
- }
- ChainTarget_TargetType_value = map[string]int32{
- "UNDEFINED": 0,
- "NAMESPACE": 1,
- "CONTAINER": 2,
- "USER": 3,
- "GROUP": 4,
- }
-)
-
-func (x ChainTarget_TargetType) String() string {
- if v, ok := ChainTarget_TargetType_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *ChainTarget_TargetType) FromString(s string) bool {
- if v, ok := ChainTarget_TargetType_value[s]; ok {
- *x = ChainTarget_TargetType(v)
- return true
- }
- return false
-}
-
-type ChainTarget struct {
- Type ChainTarget_TargetType `json:"type"`
- Name string `json:"Name"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ChainTarget)(nil)
- _ encoding.ProtoUnmarshaler = (*ChainTarget)(nil)
- _ json.Marshaler = (*ChainTarget)(nil)
- _ json.Unmarshaler = (*ChainTarget)(nil)
-)
-
-// StableSize returns the size of x in protobuf format.
+// StableMarshal marshals x in protobuf binary format with stable field order.
//
-// Structures with the same field values have the same binary size.
-func (x *ChainTarget) StableSize() (size int) {
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *BlobstorInfo) StableMarshal(buf []byte) []byte {
if x == nil {
- return 0
- }
- size += proto.EnumSize(1, int32(x.Type))
- size += proto.StringSize(2, x.Name)
- return size
-}
-
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ChainTarget) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ChainTarget) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if int32(x.Type) != 0 {
- mm.AppendInt32(1, int32(x.Type))
- }
- if len(x.Name) != 0 {
- mm.AppendString(2, x.Name)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ChainTarget) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ChainTarget")
- }
- switch fc.FieldNum {
- case 1: // Type
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Type")
- }
- x.Type = ChainTarget_TargetType(data)
- case 2: // Name
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Name")
- }
- x.Name = data
- }
- }
- return nil
-}
-func (x *ChainTarget) GetType() ChainTarget_TargetType {
- if x != nil {
- return x.Type
- }
- return 0
-}
-func (x *ChainTarget) SetType(v ChainTarget_TargetType) {
- x.Type = v
-}
-func (x *ChainTarget) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-func (x *ChainTarget) SetName(v string) {
- x.Name = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ChainTarget) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"type\":"
- out.RawString(prefix)
- v := int32(x.Type)
- if vv, ok := ChainTarget_TargetType_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"Name\":"
- out.RawString(prefix)
- out.String(x.Name)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ChainTarget) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ChainTarget) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "type":
- {
- var f ChainTarget_TargetType
- var parsedValue ChainTarget_TargetType
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := ChainTarget_TargetType_value[v]; ok {
- parsedValue = ChainTarget_TargetType(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = ChainTarget_TargetType(vv)
- case float64:
- parsedValue = ChainTarget_TargetType(v)
- }
- f = parsedValue
- x.Type = f
- }
- case "Name":
- {
- var f string
- f = in.String()
- x.Name = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
+ return []byte{}
}
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.StringMarshal(1, buf[offset:], x.Path)
+ offset += proto.StringMarshal(2, buf[offset:], x.Type)
+ return buf
}
diff --git a/pkg/services/control/types_test.go b/pkg/services/control/types_test.go
new file mode 100644
index 000000000..1505a985c
--- /dev/null
+++ b/pkg/services/control/types_test.go
@@ -0,0 +1,150 @@
+package control_test
+
+import (
+ "bytes"
+ "path/filepath"
+ "strconv"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/google/uuid"
+)
+
+func TestNetmap_StableMarshal(t *testing.T) {
+ testStableMarshal(t, generateNetmap(), new(control.Netmap), func(m1, m2 protoMessage) bool {
+ return equalNetmaps(m1.(*control.Netmap), m2.(*control.Netmap))
+ })
+}
+
+func generateNetmap() *control.Netmap {
+ nm := new(control.Netmap)
+ nm.SetEpoch(13)
+
+ const nodeCount = 2
+
+ nodes := make([]*control.NodeInfo, 0, nodeCount)
+
+ for i := 0; i < nodeCount; i++ {
+ n := new(control.NodeInfo)
+ n.SetPublicKey(testData(33))
+ n.SetAddresses([]string{testString(), testString()})
+ n.SetState(control.NetmapStatus_ONLINE)
+
+ const attrCount = 2
+
+ attrs := make([]*control.NodeInfo_Attribute, 0, attrCount)
+
+ for j := 0; j < attrCount; j++ {
+ a := new(control.NodeInfo_Attribute)
+ a.SetKey(testString())
+ a.SetValue(testString())
+
+ const parentsCount = 2
+
+ parents := make([]string, 0, parentsCount)
+
+ for k := 0; k < parentsCount; k++ {
+ parents = append(parents, testString())
+ }
+
+ a.SetParents(parents)
+
+ attrs = append(attrs, a)
+ }
+
+ n.SetAttributes(attrs)
+
+ nodes = append(nodes, n)
+ }
+
+ nm.SetNodes(nodes)
+
+ return nm
+}
+
+func equalNetmaps(nm1, nm2 *control.Netmap) bool {
+ if nm1.GetEpoch() != nm2.GetEpoch() {
+ return false
+ }
+
+ n1, n2 := nm1.GetNodes(), nm2.GetNodes()
+
+ if len(n1) != len(n2) {
+ return false
+ }
+
+ for i := range n1 {
+ if !equalNodeInfos(n1[i], n2[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func equalNodeInfos(n1, n2 *control.NodeInfo) bool {
+ if !bytes.Equal(n1.GetPublicKey(), n2.GetPublicKey()) ||
+ n1.GetState() != n2.GetState() {
+ return false
+ }
+
+ na1, na2 := n1.GetAddresses(), n2.GetAddresses()
+
+ if len(na1) != len(na2) {
+ return false
+ }
+
+ for i := range na1 {
+ if na1[i] != na2[i] {
+ return false
+ }
+ }
+
+ a1, a2 := n1.GetAttributes(), n2.GetAttributes()
+
+ if len(a1) != len(a2) {
+ return false
+ }
+
+ for i := range a1 {
+ if a1[i].GetKey() != a2[i].GetKey() || a1[i].GetValue() != a2[i].GetValue() {
+ return false
+ }
+
+ p1, p2 := a1[i].GetParents(), a2[i].GetParents()
+
+ if len(p1) != len(p2) {
+ return false
+ }
+
+ for j := range p1 {
+ if p1[j] != p2[j] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+func generateShardInfo(id int) *control.ShardInfo {
+ si := new(control.ShardInfo)
+
+ path := "/nice/dir/awesome/files/" + strconv.Itoa(id)
+
+ uid, _ := uuid.NewRandom()
+ bin, _ := uid.MarshalBinary()
+
+ si.SetID(bin)
+ si.SetMode(control.ShardMode_READ_WRITE)
+ si.SetMetabasePath(filepath.Join(path, "meta"))
+ si.Blobstor = []*control.BlobstorInfo{
+ {Type: fstree.Type, Path: filepath.Join(path, "fstree")},
+ {Type: blobovniczatree.Type, Path: filepath.Join(path, "blobtree")}}
+ si.SetWriteCachePath(filepath.Join(path, "writecache"))
+ si.SetPiloramaPath(filepath.Join(path, "pilorama"))
+
+ return si
+}
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 1b92fdaad..d1e7a949e 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -5,11 +5,10 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
@@ -27,9 +26,9 @@ type executorSvc struct {
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // LocalNodeInfo must return current node state
+ // Must return current node state
// in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() *netmapSDK.NodeInfo
+ LocalNodeInfo() (*netmap.NodeInfo, error)
// ReadCurrentNetMap reads current local network map of the storage node
// into the given parameter. Returns any error encountered which prevented
@@ -40,19 +39,17 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
- Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error)
+ Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
}
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server {
- // this should never happen, otherwise it's a programmer's bug
- msg := "BUG: can't create netmap execution service"
- assert.False(s == nil, msg, "node state is nil")
- assert.False(netInfo == nil, msg, "network info is nil")
- assert.False(respSvc == nil, msg, "response service is nil")
- assert.True(version.IsValid(v), msg, "invalid version")
+ if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil {
+ // this should never happen, otherwise it programmers bug
+ panic("can't create netmap execution service")
+ }
res := &executorSvc{
state: s,
@@ -67,15 +64,38 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo,
func (s *executorSvc) LocalNodeInfo(
_ context.Context,
- _ *netmap.LocalNodeInfoRequest,
-) (*netmap.LocalNodeInfoResponse, error) {
- ni := s.state.LocalNodeInfo()
- var nodeInfo netmap.NodeInfo
- ni.WriteToV2(&nodeInfo)
+ req *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) {
+ verV2 := req.GetMetaHeader().GetVersion()
+ if verV2 == nil {
+ return nil, errors.New("missing version")
+ }
+
+ var ver versionsdk.Version
+ if err := ver.ReadFromV2(*verV2); err != nil {
+ return nil, fmt.Errorf("can't read version: %w", err)
+ }
+
+ ni, err := s.state.LocalNodeInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 {
+ ni2 := new(netmap.NodeInfo)
+ ni2.SetPublicKey(ni.GetPublicKey())
+ ni2.SetState(ni.GetState())
+ ni2.SetAttributes(ni.GetAttributes())
+ ni.IterateAddresses(func(s string) bool {
+ ni2.SetAddresses(s)
+ return true
+ })
+
+ ni = ni2
+ }
body := new(netmap.LocalNodeInfoResponseBody)
body.SetVersion(&s.version)
- body.SetNodeInfo(&nodeInfo)
+ body.SetNodeInfo(ni)
resp := new(netmap.LocalNodeInfoResponse)
resp.SetBody(body)
@@ -85,9 +105,8 @@ func (s *executorSvc) LocalNodeInfo(
}
func (s *executorSvc) NetworkInfo(
- ctx context.Context,
- req *netmap.NetworkInfoRequest,
-) (*netmap.NetworkInfoResponse, error) {
+ _ context.Context,
+ req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) {
verV2 := req.GetMetaHeader().GetVersion()
if verV2 == nil {
return nil, errors.New("missing protocol version in meta header")
@@ -98,7 +117,7 @@ func (s *executorSvc) NetworkInfo(
return nil, fmt.Errorf("can't read version: %w", err)
}
- ni, err := s.netInfo.Dump(ctx, ver)
+ ni, err := s.netInfo.Dump(ver)
if err != nil {
return nil, err
}
diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go
index eff880dbe..0a09c9f44 100644
--- a/pkg/services/netmap/server.go
+++ b/pkg/services/netmap/server.go
@@ -3,7 +3,7 @@ package netmap
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
)
// Server is an interface of the FrostFS API Netmap service server.
diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go
index 5f184d5c0..305d3443e 100644
--- a/pkg/services/netmap/sign.go
+++ b/pkg/services/netmap/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
)
type signService struct {
@@ -23,8 +23,7 @@ func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
func (s *signService) LocalNodeInfo(
ctx context.Context,
- req *netmap.LocalNodeInfoRequest,
-) (*netmap.LocalNodeInfoResponse, error) {
+ req *netmap.LocalNodeInfoRequest) (*netmap.LocalNodeInfoResponse, error) {
if err := s.sigSvc.VerifyRequest(req); err != nil {
resp := new(netmap.LocalNodeInfoResponse)
return resp, s.sigSvc.SignResponse(resp, err)
diff --git a/pkg/services/notificator/deps.go b/pkg/services/notificator/deps.go
new file mode 100644
index 000000000..d6330f788
--- /dev/null
+++ b/pkg/services/notificator/deps.go
@@ -0,0 +1,22 @@
+package notificator
+
+import (
+ "context"
+
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+// NotificationSource is a source of object notifications.
+type NotificationSource interface {
+ // Iterate must iterate over all notifications for the
+ // provided epoch and call handler for all of them.
+ Iterate(ctx context.Context, epoch uint64, handler func(topic string, addr oid.Address))
+}
+
+// NotificationWriter notifies all the subscribers
+// about new object notifications.
+type NotificationWriter interface {
+ // Notify must notify about an event generated
+ // from an object with a specific topic.
+ Notify(topic string, address oid.Address)
+}
diff --git a/pkg/services/notificator/nats/options.go b/pkg/services/notificator/nats/options.go
new file mode 100644
index 000000000..c9ba2ed26
--- /dev/null
+++ b/pkg/services/notificator/nats/options.go
@@ -0,0 +1,38 @@
+package nats
+
+import (
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/nats-io/nats.go"
+)
+
+func WithClientCert(certPath, keyPath string) Option {
+ return func(o *opts) {
+ o.nOpts = append(o.nOpts, nats.ClientCert(certPath, keyPath))
+ }
+}
+
+func WithRootCA(paths ...string) Option {
+ return func(o *opts) {
+ o.nOpts = append(o.nOpts, nats.RootCAs(paths...))
+ }
+}
+
+func WithTimeout(timeout time.Duration) Option {
+ return func(o *opts) {
+ o.nOpts = append(o.nOpts, nats.Timeout(timeout))
+ }
+}
+
+func WithConnectionName(name string) Option {
+ return func(o *opts) {
+ o.nOpts = append(o.nOpts, nats.Name(name))
+ }
+}
+
+func WithLogger(logger *logger.Logger) Option {
+ return func(o *opts) {
+ o.log = logger
+ }
+}
diff --git a/pkg/services/notificator/nats/service.go b/pkg/services/notificator/nats/service.go
new file mode 100644
index 000000000..7f525df95
--- /dev/null
+++ b/pkg/services/notificator/nats/service.go
@@ -0,0 +1,129 @@
+package nats
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nats-io/nats.go"
+ "go.uber.org/zap"
+)
+
+// Writer is a NATS object notification writer.
+// It handles NATS JetStream connections and allows
+// sending string representation of the address to
+// the NATS server.
+//
+// For correct operation must be created via New function.
+// new(Writer) or Writer{} construction leads to undefined
+// behaviour and is not safe.
+type Writer struct {
+ js nats.JetStreamContext
+ nc *nats.Conn
+
+ m sync.RWMutex
+ createdStreams map[string]struct{}
+ opts
+}
+
+type opts struct {
+ log *logger.Logger
+ nOpts []nats.Option
+}
+
+type Option func(*opts)
+
+var errConnIsClosed = errors.New("connection to the server is closed")
+
+// Notify sends object address's string representation to the provided topic.
+// Uses first 4 bytes of object ID as a message ID to support 'exactly once'
+// message delivery.
+//
+// Returns error only if:
+// 1. underlying connection was closed and has not been established again;
+// 2. NATS server could not respond that it has saved the message.
+func (n *Writer) Notify(topic string, address oid.Address) error {
+ if !n.nc.IsConnected() {
+ return errConnIsClosed
+ }
+
+ // use first 4 byte of the encoded string as
+ // message ID for the 'exactly once' delivery
+ messageID := address.Object().EncodeToString()[:4]
+
+ // check if the stream was previously created
+ n.m.RLock()
+ _, created := n.createdStreams[topic]
+ n.m.RUnlock()
+
+ if !created {
+ _, err := n.js.AddStream(&nats.StreamConfig{
+ Name: topic,
+ })
+ if err != nil {
+ return fmt.Errorf("could not add stream: %w", err)
+ }
+
+ n.m.Lock()
+ n.createdStreams[topic] = struct{}{}
+ n.m.Unlock()
+ }
+
+ _, err := n.js.Publish(topic, []byte(address.EncodeToString()), nats.MsgId(messageID))
+ return err
+}
+
+// New creates new Writer.
+func New(oo ...Option) *Writer {
+ w := &Writer{
+ createdStreams: make(map[string]struct{}),
+ opts: opts{
+ log: &logger.Logger{Logger: zap.L()},
+ nOpts: make([]nats.Option, 0, len(oo)+3),
+ },
+ }
+
+ for _, o := range oo {
+ o(&w.opts)
+ }
+
+ w.opts.nOpts = append(w.opts.nOpts,
+ nats.NoCallbacksAfterClientClose(), // do not call callbacks when it was planned writer stop
+ nats.DisconnectErrHandler(func(conn *nats.Conn, err error) {
+ w.log.Error(logs.NatsNatsConnectionWasLost, zap.Error(err))
+ }),
+ nats.ReconnectHandler(func(conn *nats.Conn) {
+ w.log.Warn(logs.NatsNatsReconnectedToTheServer)
+ }),
+ )
+
+ return w
+}
+
+// Connect tries to connect to a specified NATS endpoint.
+//
+// Connection is closed when passed context is done.
+func (n *Writer) Connect(ctx context.Context, endpoint string) error {
+ nc, err := nats.Connect(endpoint, n.opts.nOpts...)
+ if err != nil {
+ return fmt.Errorf("could not connect to server: %w", err)
+ }
+
+ n.nc = nc
+
+ // usage w/o options is error-free
+ n.js, _ = nc.JetStream()
+
+ go func() {
+ <-ctx.Done()
+ n.opts.log.Info(logs.NatsNatsClosingConnectionAsTheContextIsDone)
+
+ nc.Close()
+ }()
+
+ return nil
+}
diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go
new file mode 100644
index 000000000..bbf4e4823
--- /dev/null
+++ b/pkg/services/notificator/service.go
@@ -0,0 +1,88 @@
+package notificator
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+// Prm groups Notificator constructor's
+// parameters. All are required.
+type Prm struct {
+ writer NotificationWriter
+ notificationSource NotificationSource
+ logger *logger.Logger
+}
+
+// SetLogger sets a logger.
+func (prm *Prm) SetLogger(v *logger.Logger) *Prm {
+ prm.logger = v
+ return prm
+}
+
+// SetWriter sets notification writer.
+func (prm *Prm) SetWriter(v NotificationWriter) *Prm {
+ prm.writer = v
+ return prm
+}
+
+// SetNotificationSource sets notification source.
+func (prm *Prm) SetNotificationSource(v NotificationSource) *Prm {
+ prm.notificationSource = v
+ return prm
+}
+
+// Notificator is a notification producer that handles
+// objects with defined notification epoch.
+//
+// Working client must be created via constructor New.
+// Using the Client that has been created with new(Client)
+// expression (or just declaring a Client variable) is unsafe
+// and can lead to panic.
+type Notificator struct {
+ w NotificationWriter
+ ns NotificationSource
+ l *logger.Logger
+}
+
+// New creates, initializes and returns the Notificator instance.
+//
+// Panics if any field of the passed Prm structure is not set/set
+// to nil.
+func New(prm *Prm) *Notificator {
+ panicOnNil := func(v any, name string) {
+ if v == nil {
+ panic(fmt.Sprintf("Notificator constructor: %s is nil\n", name))
+ }
+ }
+
+ panicOnNil(prm.writer, "NotificationWriter")
+ panicOnNil(prm.notificationSource, "NotificationSource")
+ panicOnNil(prm.logger, "Logger")
+
+ return &Notificator{
+ w: prm.writer,
+ ns: prm.notificationSource,
+ l: prm.logger,
+ }
+}
+
+// ProcessEpoch looks for all objects with defined epoch in the storage
+// and passes their addresses to the NotificationWriter.
+func (n *Notificator) ProcessEpoch(ctx context.Context, epoch uint64) {
+ logger := n.l.With(zap.Uint64("epoch", epoch))
+ logger.Debug(logs.NotificatorNotificatorStartProcessingObjectNotifications)
+
+ n.ns.Iterate(ctx, epoch, func(topic string, addr oid.Address) {
+ n.l.Debug(logs.NotificatorNotificatorProcessingObjectNotification,
+ zap.String("topic", topic),
+ zap.Stringer("address", addr),
+ )
+
+ n.w.Notify(topic, addr)
+ })
+}
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
new file mode 100644
index 000000000..921545c8b
--- /dev/null
+++ b/pkg/services/object/acl/acl.go
@@ -0,0 +1,262 @@
+package acl
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+ "io"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// Checker implements v2.ACLChecker interfaces and provides
+// ACL/eACL validation functionality.
+type Checker struct {
+ eaclSrc container.EACLSource
+ validator *eaclSDK.Validator
+ localStorage *engine.StorageEngine
+ state netmap.State
+}
+
+type localStorage struct {
+ ls *engine.StorageEngine
+}
+
+func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ if s.ls == nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ return engine.Head(ctx, s.ls, addr)
+}
+
+// Various EACL check errors.
+var (
+ errEACLDeniedByRule = errors.New("denied by rule")
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+// NewChecker creates Checker.
+// Panics if at least one of the parameter is nil.
+func NewChecker(
+ state netmap.State,
+ eaclSrc container.EACLSource,
+ validator *eaclSDK.Validator,
+ localStorage *engine.StorageEngine,
+) *Checker {
+ return &Checker{
+ eaclSrc: eaclSrc,
+ validator: validator,
+ localStorage: localStorage,
+ state: state,
+ }
+}
+
+// CheckBasicACL is a main check function for basic ACL.
+func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool {
+ // check basic ACL permissions
+ return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole())
+}
+
+// StickyBitCheck validates owner field in the request if sticky bit is enabled.
+func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
+ // According to FrostFS specification sticky bit has no effect on system nodes
+ // for correct intra-container work with objects (in particular, replication).
+ if info.RequestRole() == acl.RoleContainer {
+ return true
+ }
+
+ if !info.BasicACL().Sticky() {
+ return true
+ }
+
+ if len(info.SenderKey()) == 0 {
+ return false
+ }
+
+ requestSenderKey := unmarshalPublicKey(info.SenderKey())
+
+ return isOwnerFromKey(owner, requestSenderKey)
+}
+
+// CheckEACL is a main check function for extended ACL.
+func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
+ basicACL := reqInfo.BasicACL()
+ if !basicACL.Extendable() {
+ return nil
+ }
+
+ bearerTok := reqInfo.Bearer()
+ impersonate := bearerTok != nil && bearerTok.Impersonate()
+
+ // if bearer token is not allowed, then ignore it
+ if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) {
+ reqInfo.CleanBearer()
+ }
+
+ var table eaclSDK.Table
+ cnr := reqInfo.ContainerID()
+
+ if bearerTok == nil {
+ eaclInfo, err := c.eaclSrc.GetEACL(cnr)
+ if err != nil {
+ if client.IsErrEACLNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ table = *eaclInfo.Value
+ } else {
+ table = bearerTok.EACLTable()
+ }
+
+ // if bearer token is not present, isValidBearer returns true
+ if err := isValidBearer(reqInfo, c.state); err != nil {
+ return err
+ }
+
+ hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
+ if err != nil {
+ return err
+ }
+
+ eaclRole := getRole(reqInfo)
+
+ action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
+ WithRole(eaclRole).
+ WithOperation(eaclSDK.Operation(reqInfo.Operation())).
+ WithContainerID(&cnr).
+ WithSenderKey(reqInfo.SenderKey()).
+ WithHeaderSource(hdrSrc).
+ WithEACLTable(&table),
+ )
+
+ if action != eaclSDK.ActionAllow {
+ return errEACLDeniedByRule
+ }
+ return nil
+}
+
+func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
+ var eaclRole eaclSDK.Role
+ switch op := reqInfo.RequestRole(); op {
+ default:
+ eaclRole = eaclSDK.Role(op)
+ case acl.RoleOwner:
+ eaclRole = eaclSDK.RoleUser
+ case acl.RoleInnerRing, acl.RoleContainer:
+ eaclRole = eaclSDK.RoleSystem
+ case acl.RoleOthers:
+ eaclRole = eaclSDK.RoleOthers
+ }
+ return eaclRole
+}
+
+func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
+ var xHeaderSource eaclV2.XHeaderSource
+ if req, ok := msg.(eaclV2.Request); ok {
+ xHeaderSource = eaclV2.NewRequestXHeaderSource(req)
+ } else {
+ xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request))
+ }
+
+ hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID()))
+ if err != nil {
+ return nil, fmt.Errorf("can't parse headers: %w", err)
+ }
+ return hdrSrc, nil
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error {
+ ownerCnr := reqInfo.ContainerOwner()
+
+ token := reqInfo.Bearer()
+
+ // 0. Check if bearer token is present in reqInfo.
+ if token == nil {
+ return nil
+ }
+
+ // 1. First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // 2. Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // 3. Then check if container is either empty or equal to the container in the request.
+ cnr, isSet := token.EACLTable().CID()
+ if isSet && !cnr.Equals(reqInfo.ContainerID()) {
+ return errBearerInvalidContainerID
+ }
+
+ // 4. Then check if container owner signed this token.
+ if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerNotSignedByOwner
+ }
+
+ // 5. Then check if request sender has rights to use this token.
+ var keySender frostfsecdsa.PublicKey
+
+ err := keySender.Decode(reqInfo.SenderKey())
+ if err != nil {
+ return fmt.Errorf("decode sender public key: %w", err)
+ }
+
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender))
+
+ if !token.AssertUser(usrSender) {
+ // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
+
+func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
+ if key == nil {
+ return false
+ }
+
+ var id2 user.ID
+ user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
+
+ return id.Equals(id2)
+}
+
+func unmarshalPublicKey(bs []byte) *keys.PublicKey {
+ pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256())
+ if err != nil {
+ return nil
+ }
+ return pub
+}
diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go
new file mode 100644
index 000000000..b9b82dac8
--- /dev/null
+++ b/pkg/services/object/acl/acl_test.go
@@ -0,0 +1,89 @@
+package acl
+
+import (
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "github.com/stretchr/testify/require"
+)
+
+type emptyEACLSource struct{}
+
+func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) {
+ return nil, nil
+}
+
+type emptyNetmapState struct{}
+
+func (e emptyNetmapState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func TestStickyCheck(t *testing.T) {
+ checker := NewChecker(
+ emptyNetmapState{},
+ emptyEACLSource{},
+ eaclSDK.NewValidator(),
+ &engine.StorageEngine{})
+
+ t.Run("system role", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetSenderKey(make([]byte, 33)) // any non-empty key
+ info.SetRequestRole(acl.RoleContainer)
+
+ require.True(t, checker.StickyBitCheck(info, *usertest.ID()))
+
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+
+ require.True(t, checker.StickyBitCheck(info, *usertest.ID()))
+ })
+
+ t.Run("owner ID and/or public key emptiness", func(t *testing.T) {
+ var info v2.RequestInfo
+
+ info.SetRequestRole(acl.RoleOthers) // should be non-system role
+
+ assertFn := func(isSticky, withKey, withOwner, expected bool) {
+ info := info
+ if isSticky {
+ var basicACL acl.Basic
+ basicACL.MakeSticky()
+
+ info.SetBasicACL(basicACL)
+ }
+
+ if withKey {
+ info.SetSenderKey(make([]byte, 33))
+ } else {
+ info.SetSenderKey(nil)
+ }
+
+ var ownerID user.ID
+
+ if withOwner {
+ ownerID = *usertest.ID()
+ }
+
+ require.Equal(t, expected, checker.StickyBitCheck(info, ownerID))
+ }
+
+ assertFn(true, false, false, false)
+ assertFn(true, true, false, false)
+ assertFn(true, false, true, false)
+ assertFn(false, false, false, true)
+ assertFn(false, true, false, true)
+ assertFn(false, false, true, true)
+ assertFn(false, true, true, true)
+ })
+}
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
new file mode 100644
index 000000000..023b99239
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/eacl_test.go
@@ -0,0 +1,166 @@
+package v2
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "testing"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/stretchr/testify/require"
+)
+
+type testLocalStorage struct {
+ t *testing.T
+
+ expAddr oid.Address
+
+ obj *objectSDK.Object
+
+ err error
+}
+
+func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
+ require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
+
+ return s.obj, s.err
+}
+
+func testXHeaders(strs ...string) []session.XHeader {
+ res := make([]session.XHeader, len(strs)/2)
+
+ for i := 0; i < len(strs); i += 2 {
+ res[i/2].SetKey(strs[i])
+ res[i/2].SetValue(strs[i+1])
+ }
+
+ return res
+}
+
+func TestHeadRequest(t *testing.T) {
+ req := new(objectV2.HeadRequest)
+
+ meta := new(session.RequestMetaHeader)
+ req.SetMetaHeader(meta)
+
+ body := new(objectV2.HeadRequestBody)
+ req.SetBody(body)
+
+ addr := oidtest.Address()
+
+ var addrV2 refs.Address
+ addr.WriteToV2(&addrV2)
+
+ body.SetAddress(&addrV2)
+
+ xKey := "x-key"
+ xVal := "x-val"
+ xHdrs := testXHeaders(
+ xKey, xVal,
+ )
+
+ meta.SetXHeaders(xHdrs)
+
+ obj := objectSDK.New()
+
+ attrKey := "attr_key"
+ attrVal := "attr_val"
+ var attr objectSDK.Attribute
+ attr.SetKey(attrKey)
+ attr.SetValue(attrVal)
+ obj.SetAttributes(attr)
+
+ table := new(eaclSDK.Table)
+
+ priv, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ senderKey := priv.PublicKey()
+
+ r := eaclSDK.NewRecord()
+ r.SetOperation(eaclSDK.OperationHead)
+ r.SetAction(eaclSDK.ActionDeny)
+ r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
+ r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
+ eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table.AddRecord(r)
+
+ lStorage := &testLocalStorage{
+ t: t,
+ expAddr: addr,
+ obj: obj,
+ }
+
+ id := addr.Object()
+
+ newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
+ hdrSrc, err := NewMessageHeaderSource(
+ lStorage,
+ NewRequestXHeaderSource(req),
+ addr.Container(),
+ WithOID(&id))
+ require.NoError(t, err)
+ return hdrSrc
+ }
+
+ cnr := addr.Container()
+
+ unit := new(eaclSDK.ValidationUnit).
+ WithContainerID(&cnr).
+ WithOperation(eaclSDK.OperationHead).
+ WithSenderKey(senderKey.Bytes()).
+ WithEACLTable(table)
+
+ validator := eaclSDK.NewValidator()
+
+ checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(nil)
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ meta.SetXHeaders(xHdrs)
+
+ obj.SetAttributes()
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ lStorage.err = errors.New("any error")
+
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+
+ r.SetAction(eaclSDK.ActionAllow)
+
+ rID := eaclSDK.NewRecord()
+ rID.SetOperation(eaclSDK.OperationHead)
+ rID.SetAction(eaclSDK.ActionDeny)
+ rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
+ eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
+
+ table = eaclSDK.NewTable()
+ table.AddRecord(r)
+ table.AddRecord(rID)
+
+ unit.WithEACLTable(table)
+ checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
+}
+
+func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.True(t, fromRule)
+ require.Equal(t, expected, actual)
+}
+
+func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
+ actual, fromRule := v.CalculateAction(u)
+ require.False(t, fromRule)
+ require.Equal(t, eaclSDK.ActionAllow, actual)
+}
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
new file mode 100644
index 000000000..34975e1e6
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/headers.go
@@ -0,0 +1,246 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+type Option func(*cfg)
+
+type cfg struct {
+ storage ObjectStorage
+
+ msg XHeaderSource
+
+ cnr cid.ID
+ obj *oid.ID
+}
+
+type ObjectStorage interface {
+ Head(context.Context, oid.Address) (*objectSDK.Object, error)
+}
+
+type Request interface {
+ GetMetaHeader() *session.RequestMetaHeader
+}
+
+type Response interface {
+ GetMetaHeader() *session.ResponseMetaHeader
+}
+
+type headerSource struct {
+ requestHeaders []eaclSDK.Header
+ objectHeaders []eaclSDK.Header
+
+ incompleteObjectHeaders bool
+}
+
+func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
+ cfg := &cfg{
+ storage: os,
+ cnr: cnrID,
+ msg: xhs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ if cfg.msg == nil {
+ return nil, errors.New("message is not provided")
+ }
+
+ var res headerSource
+
+ err := cfg.readObjectHeaders(&res)
+ if err != nil {
+ return nil, err
+ }
+
+ res.requestHeaders = cfg.msg.GetXHeaders()
+
+ return res, nil
+}
+
+func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
+ switch typ {
+ default:
+ return nil, true
+ case eaclSDK.HeaderFromRequest:
+ return h.requestHeaders, true
+ case eaclSDK.HeaderFromObject:
+ return h.objectHeaders, !h.incompleteObjectHeaders
+ }
+}
+
+type xHeader session.XHeader
+
+func (x xHeader) Key() string {
+ return (*session.XHeader)(&x).GetKey()
+}
+
+func (x xHeader) Value() string {
+ return (*session.XHeader)(&x).GetValue()
+}
+
+var errMissingOID = errors.New("object ID is missing")
+
+func (h *cfg) readObjectHeaders(dst *headerSource) error {
+ switch m := h.msg.(type) {
+ default:
+ panic(fmt.Sprintf("unexpected message type %T", h.msg))
+ case requestXHeaderSource:
+ return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
+ case responseXHeaderSource:
+ return h.readObjectHeadersResponseXHeaderSource(m, dst)
+ }
+}
+
+func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
+ switch req := m.req.(type) {
+ case
+ *objectV2.GetRequest,
+ *objectV2.HeadRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objHeaders
+ dst.incompleteObjectHeaders = !completed
+ case
+ *objectV2.GetRangeRequest,
+ *objectV2.GetRangeHashRequest,
+ *objectV2.DeleteRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ dst.objectHeaders = addressHeaders(h.cnr, h.obj)
+ case *objectV2.PutRequest:
+ if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.PutSingleRequest:
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
+ case *objectV2.SearchRequest:
+ cnrV2 := req.GetBody().GetContainerID()
+ var cnr cid.ID
+
+ if cnrV2 != nil {
+ if err := cnr.ReadFromV2(*cnrV2); err != nil {
+ return fmt.Errorf("can't parse container ID: %w", err)
+ }
+ }
+
+ dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
+ }
+ return nil
+}
+
+func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
+ switch resp := m.resp.(type) {
+ default:
+ objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objectHeaders
+ dst.incompleteObjectHeaders = !completed
+ case *objectV2.GetResponse:
+ if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.HeadResponse:
+ oV2 := new(objectV2.Object)
+
+ var hdr *objectV2.Header
+
+ switch v := resp.GetBody().GetHeaderPart().(type) {
+ case *objectV2.ShortHeader:
+ hdr = new(objectV2.Header)
+
+ var idV2 refsV2.ContainerID
+ h.cnr.WriteToV2(&idV2)
+
+ hdr.SetContainerID(&idV2)
+ hdr.SetVersion(v.GetVersion())
+ hdr.SetCreationEpoch(v.GetCreationEpoch())
+ hdr.SetOwnerID(v.GetOwnerID())
+ hdr.SetObjectType(v.GetObjectType())
+ hdr.SetPayloadLength(v.GetPayloadLength())
+ case *objectV2.HeaderWithSignature:
+ hdr = v.GetHeader()
+ }
+
+ oV2.SetHeader(hdr)
+
+ dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ return nil
+}
+
+func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
+ if idObj != nil {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(*idObj)
+
+ obj, err := h.storage.Head(context.TODO(), addr)
+ if err == nil {
+ return headersFromObject(obj, cnr, idObj), true
+ }
+ }
+
+ return addressHeaders(cnr, idObj), false
+}
+
+func cidHeader(idCnr cid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectContainerID,
+ v: idCnr.EncodeToString(),
+ }
+}
+
+func oidHeader(obj oid.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectID,
+ v: obj.EncodeToString(),
+ }
+}
+
+func ownerIDHeader(ownerID user.ID) sysObjHdr {
+ return sysObjHdr{
+ k: acl.FilterObjectOwnerID,
+ v: ownerID.EncodeToString(),
+ }
+}
+
+func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ hh := make([]eaclSDK.Header, 0, 2)
+ hh = append(hh, cidHeader(cnr))
+
+ if oid != nil {
+ hh = append(hh, oidHeader(*oid))
+ }
+
+ return hh
+}
diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go
new file mode 100644
index 000000000..690e4aa70
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/object.go
@@ -0,0 +1,92 @@
+package v2
+
+import (
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type sysObjHdr struct {
+ k, v string
+}
+
+func (s sysObjHdr) Key() string {
+ return s.k
+}
+
+func (s sysObjHdr) Value() string {
+ return s.v
+}
+
+func u64Value(v uint64) string {
+ return strconv.FormatUint(v, 10)
+}
+
+func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
+ var count int
+ for obj := obj; obj != nil; obj = obj.Parent() {
+ count += 9 + len(obj.Attributes())
+ }
+
+ res := make([]eaclSDK.Header, 0, count)
+ for ; obj != nil; obj = obj.Parent() {
+ res = append(res,
+ cidHeader(cnr),
+ // creation epoch
+ sysObjHdr{
+ k: acl.FilterObjectCreationEpoch,
+ v: u64Value(obj.CreationEpoch()),
+ },
+ // payload size
+ sysObjHdr{
+ k: acl.FilterObjectPayloadLength,
+ v: u64Value(obj.PayloadSize()),
+ },
+ // object version
+ sysObjHdr{
+ k: acl.FilterObjectVersion,
+ v: obj.Version().String(),
+ },
+ // object type
+ sysObjHdr{
+ k: acl.FilterObjectType,
+ v: obj.Type().String(),
+ },
+ )
+
+ if oid != nil {
+ res = append(res, oidHeader(*oid))
+ }
+
+ if idOwner := obj.OwnerID(); idOwner != nil {
+ res = append(res, ownerIDHeader(*idOwner))
+ }
+
+ cs, ok := obj.PayloadChecksum()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectPayloadHash,
+ v: cs.String(),
+ })
+ }
+
+ cs, ok = obj.PayloadHomomorphicHash()
+ if ok {
+ res = append(res, sysObjHdr{
+ k: acl.FilterObjectHomomorphicHash,
+ v: cs.String(),
+ })
+ }
+
+ attrs := obj.Attributes()
+ for i := range attrs {
+ res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go
new file mode 100644
index 000000000..d91a21c75
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/opts.go
@@ -0,0 +1,11 @@
+package v2
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func WithOID(v *oid.ID) Option {
+ return func(c *cfg) {
+ c.obj = v
+ }
+}
diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go
new file mode 100644
index 000000000..c1fdea9d8
--- /dev/null
+++ b/pkg/services/object/acl/eacl/v2/xheader.go
@@ -0,0 +1,69 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
+)
+
+type XHeaderSource interface {
+ GetXHeaders() []eaclSDK.Header
+}
+
+type requestXHeaderSource struct {
+ req Request
+}
+
+func NewRequestXHeaderSource(req Request) XHeaderSource {
+ return requestXHeaderSource{req: req}
+}
+
+type responseXHeaderSource struct {
+ resp Response
+
+ req Request
+}
+
+func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
+ return responseXHeaderSource{resp: resp, req: req}
+}
+
+func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ ln += len(meta.GetXHeaders())
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+ for i := range x {
+ res = append(res, (xHeader)(x[i]))
+ }
+ }
+
+ return res
+}
+
+func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
+ ln := 0
+ xHdrs := make([][]session.XHeader, 0)
+
+ for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
+ x := meta.GetXHeaders()
+
+ ln += len(x)
+
+ xHdrs = append(xHdrs, x)
+ }
+
+ res := make([]eaclSDK.Header, 0, ln)
+
+ for i := range xHdrs {
+ for j := range xHdrs[i] {
+ res = append(res, xHeader(xHdrs[i][j]))
+ }
+ }
+
+ return res
+}
diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go
new file mode 100644
index 000000000..7fc20d618
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors.go
@@ -0,0 +1,39 @@
+package v2
+
+import (
+ "fmt"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+)
+
+const invalidRequestMessage = "malformed request"
+
+func malformedRequestError(reason string) error {
+ return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
+}
+
+var (
+ errEmptyBody = malformedRequestError("empty body")
+ errEmptyVerificationHeader = malformedRequestError("empty verification header")
+ errEmptyBodySig = malformedRequestError("empty at body signature")
+ errInvalidSessionSig = malformedRequestError("invalid session token signature")
+ errInvalidSessionOwner = malformedRequestError("invalid session token owner")
+ errInvalidVerb = malformedRequestError("session token verb is invalid")
+)
+
+const accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check"
+const accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v"
+
+func basicACLErr(info RequestInfo) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation))
+
+ return errAccessDenied
+}
+
+func eACLErr(info RequestInfo, err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err))
+
+ return errAccessDenied
+}
diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go
new file mode 100644
index 000000000..2d2b7bc8d
--- /dev/null
+++ b/pkg/services/object/acl/v2/errors_test.go
@@ -0,0 +1,30 @@
+package v2
+
+import (
+ "errors"
+ "testing"
+
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBasicACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ err := basicACLErr(reqInfo)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
+
+func TestEACLErr(t *testing.T) {
+ var reqInfo RequestInfo
+ testErr := errors.New("test-eacl")
+ err := eACLErr(reqInfo, testErr)
+
+ var errAccessDenied *apistatus.ObjectAccessDenied
+
+ require.ErrorAs(t, err, &errAccessDenied,
+ "eACLErr must be able to be casted to apistatus.ObjectAccessDenied")
+}
diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go
new file mode 100644
index 000000000..15fcce884
--- /dev/null
+++ b/pkg/services/object/acl/v2/opts.go
@@ -0,0 +1,12 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+)
+
+// WithLogger returns option to set logger.
+func WithLogger(v *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = v
+ }
+}
diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go
new file mode 100644
index 000000000..675768969
--- /dev/null
+++ b/pkg/services/object/acl/v2/request.go
@@ -0,0 +1,145 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// RequestInfo groups parsed version-independent (from SDK library)
+// request information and raw API request.
+type RequestInfo struct {
+ basicACL acl.Basic
+ requestRole acl.Role
+ operation acl.Op // put, get, head, etc.
+ cnrOwner user.ID // container owner
+
+ idCnr cid.ID
+
+ // optional for some request
+ // e.g. Put, Search
+ obj *oid.ID
+
+ senderKey []byte
+
+ bearer *bearer.Token // bearer token of request
+
+ srcRequest any
+}
+
+func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
+ r.basicACL = basicACL
+}
+
+func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
+ r.requestRole = requestRole
+}
+
+func (r *RequestInfo) SetSenderKey(senderKey []byte) {
+ r.senderKey = senderKey
+}
+
+// Request returns raw API request.
+func (r RequestInfo) Request() any {
+ return r.srcRequest
+}
+
+// ContainerOwner returns owner if the container.
+func (r RequestInfo) ContainerOwner() user.ID {
+ return r.cnrOwner
+}
+
+// ObjectID return object ID.
+func (r RequestInfo) ObjectID() *oid.ID {
+ return r.obj
+}
+
+// ContainerID return container ID.
+func (r RequestInfo) ContainerID() cid.ID {
+ return r.idCnr
+}
+
+// CleanBearer forces cleaning bearer token information.
+func (r *RequestInfo) CleanBearer() {
+ r.bearer = nil
+}
+
+// Bearer returns bearer token of the request.
+func (r RequestInfo) Bearer() *bearer.Token {
+ return r.bearer
+}
+
+// BasicACL returns basic ACL of the container.
+func (r RequestInfo) BasicACL() acl.Basic {
+ return r.basicACL
+}
+
+// SenderKey returns public key of the request's sender.
+func (r RequestInfo) SenderKey() []byte {
+ return r.senderKey
+}
+
+// Operation returns request's operation.
+func (r RequestInfo) Operation() acl.Op {
+ return r.operation
+}
+
+// RequestRole returns request sender's role.
+func (r RequestInfo) RequestRole() acl.Role {
+ return r.requestRole
+}
+
+// MetaWithToken groups session and bearer tokens,
+// verification header and raw API request.
+type MetaWithToken struct {
+ vheader *sessionV2.RequestVerificationHeader
+ token *sessionSDK.Object
+ bearer *bearer.Token
+ src any
+}
+
+// RequestOwner returns ownerID and its public key
+// according to internal meta information.
+func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
+ if r.vheader == nil {
+ return nil, nil, errEmptyVerificationHeader
+ }
+
+ if r.bearer != nil && r.bearer.Impersonate() {
+ return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
+ }
+
+ // if session token is presented, use it as truth source
+ if r.token != nil {
+ // verify signature of session token
+ return ownerFromToken(r.token)
+ }
+
+ // otherwise get original body signature
+ bodySignature := originalBodySignature(r.vheader)
+ if bodySignature == nil {
+ return nil, nil, errEmptyBodySig
+ }
+
+ return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
+}
+
+func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
+ key, err := unmarshalPublicKey(rawKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var idSender user.ID
+ user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
+
+ return &idSender, key, nil
+}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
new file mode 100644
index 000000000..8239403a7
--- /dev/null
+++ b/pkg/services/object/acl/v2/service.go
@@ -0,0 +1,677 @@
+package v2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "go.uber.org/zap"
+)
+
+// Service checks basic ACL rules.
+type Service struct {
+ *cfg
+
+ c objectCore.SenderClassifier
+}
+
+type putStreamBasicChecker struct {
+ source *Service
+ next object.PutObjectStream
+}
+
+type getStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectStream
+
+ info RequestInfo
+}
+
+type rangeStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.GetObjectRangeStream
+
+ info RequestInfo
+}
+
+type searchStreamBasicChecker struct {
+ checker ACLChecker
+
+ object.SearchStream
+
+ info RequestInfo
+}
+
+// Option represents Service constructor option.
+type Option func(*cfg)
+
+type cfg struct {
+ log *logger.Logger
+
+ containers container.Source
+
+ checker ACLChecker
+
+ irFetcher InnerRingFetcher
+
+ nm netmap.Source
+
+ next object.ServiceServer
+}
+
+// New is a constructor for object ACL checking service.
+func New(next object.ServiceServer,
+ nm netmap.Source,
+ irf InnerRingFetcher,
+ acl ACLChecker,
+ cs container.Source,
+ opts ...Option) Service {
+ cfg := &cfg{
+ log: &logger.Logger{Logger: zap.L()},
+ next: next,
+ nm: nm,
+ irFetcher: irf,
+ checker: acl,
+ containers: cs,
+ }
+
+ for i := range opts {
+ opts[i](cfg)
+ }
+
+ return Service{
+ cfg: cfg,
+ c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
+ }
+}
+
+// Get implements ServiceServer interface, makes ACL checks and calls
+// next Get method in the ServiceServer pipeline.
+func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+
+ return b.next.Get(request, &getStreamBasicChecker{
+ GetObjectStream: stream,
+ info: reqInfo,
+ checker: b.checker,
+ })
+}
+
+func (b Service) Put() (object.PutObjectStream, error) {
+ streamer, err := b.next.Put()
+
+ return putStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
+}
+
+func (b Service) Head(
+ ctx context.Context,
+ request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+
+ resp, err := b.next.Head(ctx, request)
+ if err == nil {
+ if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
+ err = eACLErr(reqInfo, err)
+ }
+ }
+
+ return resp, err
+}
+
+func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
+ id, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, id, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch)
+ if err != nil {
+ return err
+ }
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+
+ return b.next.Search(request, &searchStreamBasicChecker{
+ checker: b.checker,
+ SearchStream: stream,
+ info: reqInfo,
+ })
+}
+
+func (b Service) Delete(
+ ctx context.Context,
+ request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+
+ return b.next.Delete(ctx, request)
+}
+
+func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+
+ return b.next.GetRange(request, &rangeStreamBasicChecker{
+ checker: b.checker,
+ GetObjectRangeStream: stream,
+ info: reqInfo,
+ })
+}
+
+func (b Service) GetRangeHash(
+ ctx context.Context,
+ request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := getObjectIDFromRequestBody(request.GetBody())
+ if err != nil {
+ return nil, err
+ }
+
+ sTok, err := originalSessionToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ if sTok != nil {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) {
+ return nil, basicACLErr(reqInfo)
+ } else if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+
+ return b.next.GetRangeHash(ctx, request)
+}
+
+func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return nil, err
+ }
+
+ idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return nil, errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
+ if err != nil {
+ return nil, err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return nil, err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return nil, err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return nil, err
+ }
+
+ reqInfo.obj = obj
+
+ if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) {
+ return nil, basicACLErr(reqInfo)
+ }
+
+ if err := b.checker.CheckEACL(request, reqInfo); err != nil {
+ return nil, eACLErr(reqInfo, err)
+ }
+
+ return b.next.PutSingle(ctx, request)
+}
+
+func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ part := body.GetObjectPart()
+ if part, ok := part.(*objectV2.PutObjectPartInit); ok {
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ idV2 := part.GetHeader().GetOwnerID()
+ if idV2 == nil {
+ return errors.New("missing object owner")
+ }
+
+ var idOwner user.ID
+
+ err = idOwner.ReadFromV2(*idV2)
+ if err != nil {
+ return fmt.Errorf("invalid object owner: %w", err)
+ }
+
+ objV2 := part.GetObjectID()
+ var obj *oid.ID
+
+ if objV2 != nil {
+ obj = new(oid.ID)
+
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) {
+ return basicACLErr(reqInfo)
+ } else if err := p.source.checker.CheckEACL(request, reqInfo); err != nil {
+ return eACLErr(reqInfo, err)
+ }
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
+
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
+ if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+ }
+
+ return g.GetObjectStream.Send(resp)
+}
+
+func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.GetObjectRangeStream.Send(resp)
+}
+
+func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
+ if err := g.checker.CheckEACL(resp, g.info); err != nil {
+ return eACLErr(g.info, err)
+ }
+
+ return g.SearchStream.Send(resp)
+}
+
+func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+
+ if !assertVerb(*req.token, op) {
+ return info, errInvalidVerb
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.operation = op
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go
new file mode 100644
index 000000000..061cd26b6
--- /dev/null
+++ b/pkg/services/object/acl/v2/types.go
@@ -0,0 +1,28 @@
+package v2
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+// ACLChecker is an interface that must provide
+// ACL related checks.
+type ACLChecker interface {
+ // CheckBasicACL must return true only if request
+ // passes basic ACL validation.
+ CheckBasicACL(RequestInfo) bool
+ // CheckEACL must return non-nil error if request
+ // doesn't pass extended ACL validation.
+ CheckEACL(any, RequestInfo) error
+ // StickyBitCheck must return true only if sticky bit
+ // is disabled or enabled but request contains correct
+ // owner field.
+ StickyBitCheck(RequestInfo, user.ID) bool
+}
+
+// InnerRingFetcher is an interface that must provide
+// Inner Ring information.
+type InnerRingFetcher interface {
+ // InnerRingKeys must return list of public keys of
+ // the actual inner ring.
+ InnerRingKeys() ([][]byte, error)
+}
diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/acl/v2/util.go
similarity index 55%
rename from pkg/services/object/ape/util.go
rename to pkg/services/object/acl/v2/util.go
index 5cd2caa50..feda6a3cf 100644
--- a/pkg/services/object/ape/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -1,4 +1,4 @@
-package ape
+package v2
import (
"crypto/ecdsa"
@@ -6,34 +6,55 @@ import (
"errors"
"fmt"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
- if cidV2 != nil {
- if err = cnrID.ReadFromV2(*cidV2); err != nil {
- return
+var errMissingContainerID = errors.New("missing container ID")
+
+func getContainerIDFromRequest(req any) (cid.ID, error) {
+ var idV2 *refsV2.ContainerID
+ var id cid.ID
+
+ switch v := req.(type) {
+ case *objectV2.GetRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutRequest:
+ part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
+ if !ok {
+ return cid.ID{}, errors.New("can't get container ID in chunk")
}
- } else {
- err = errMissingContainerID
- return
+
+ idV2 = part.GetHeader().GetContainerID()
+ case *objectV2.HeadRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.SearchRequest:
+ idV2 = v.GetBody().GetContainerID()
+ case *objectV2.DeleteRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.GetRangeHashRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
+ case *objectV2.PutSingleRequest:
+ idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
+ default:
+ return cid.ID{}, errors.New("unknown request type")
}
- if objV2 != nil {
- objID = new(oid.ID)
- if err = objID.ReadFromV2(*objV2); err != nil {
- return
- }
+ if idV2 == nil {
+ return cid.ID{}, errMissingContainerID
}
- return
+
+ return id, id.ReadFromV2(*idV2)
}
// originalBearerToken goes down to original request meta header and fetches
@@ -52,6 +73,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
+// originalSessionToken goes down to original request meta header and fetches
+// session token from there.
+func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
+ for header.GetOrigin() != nil {
+ header = header.GetOrigin()
+ }
+
+ tokV2 := header.GetSessionToken()
+ if tokV2 == nil {
+ return nil, nil
+ }
+
+ var tok sessionSDK.Object
+
+ err := tok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ return &tok, nil
+}
+
+// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
+// object reference's holders. Returns an error if object ID is missing in the request.
+func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
+ idV2 := body.GetAddress().GetObjectID()
+ return getObjectIDFromRefObjectID(idV2)
+}
+
+func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
+ if idV2 == nil {
+ return nil, errors.New("missing object ID")
+ }
+
+ var id oid.ID
+
+ err := id.ReadFromV2(*idV2)
+ if err != nil {
+ return nil, err
+ }
+
+ return &id, nil
+}
+
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@@ -105,33 +170,30 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
-// assertVerb checks that token verb corresponds to the method.
-func assertVerb(tok sessionSDK.Object, method string) bool {
- switch method {
- case nativeschema.MethodPutObject:
- return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodDeleteObject:
+// assertVerb checks that token verb corresponds to op.
+func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
+ switch op {
+ case acl.OpObjectPut:
+ return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete)
+ case acl.OpObjectDelete:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
- case nativeschema.MethodGetObject:
+ case acl.OpObjectGet:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
- case nativeschema.MethodHeadObject:
+ case acl.OpObjectHead:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectPatch,
- )
- case nativeschema.MethodSearchObject:
+ sessionSDK.VerbObjectRangeHash)
+ case acl.OpObjectSearch:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
- case nativeschema.MethodRangeObject:
- return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
- case nativeschema.MethodHashObject:
+ case acl.OpObjectRange:
+ return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash)
+ case acl.OpObjectHash:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
- case nativeschema.MethodPatchObject:
- return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
+
return false
}
@@ -155,15 +217,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
-
-func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
- key, err := unmarshalPublicKey(rawKey)
- if err != nil {
- return nil, nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var idSender user.ID
- user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
-
- return &idSender, key, nil
-}
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
new file mode 100644
index 000000000..394feef4e
--- /dev/null
+++ b/pkg/services/object/acl/v2/util_test.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
+ aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
+ sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOriginalTokens(t *testing.T) {
+ sToken := sessiontest.ObjectSigned()
+ bToken := bearertest.Token()
+
+ pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, bToken.Sign(*pk))
+
+ var bTokenV2 acl.BearerToken
+ bToken.WriteToV2(&bTokenV2)
+ // This line is needed because SDK uses some custom format for
+ // reserved filters, so `cid.ID` is not converted to string immediately.
+ require.NoError(t, bToken.ReadFromV2(bTokenV2))
+
+ var sTokenV2 session.Token
+ sToken.WriteToV2(&sTokenV2)
+
+ for i := 0; i < 10; i++ {
+ metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
+ res, err := originalSessionToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, sToken, res, i)
+
+ bTok, err := originalBearerToken(metaHeaders)
+ require.NoError(t, err)
+ require.Equal(t, &bToken, bTok, i)
+ }
+}
+
+func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
+ metaHeader := new(session.RequestMetaHeader)
+ metaHeader.SetBearerToken(b)
+ metaHeader.SetSessionToken(s)
+
+ for i := uint32(0); i < depth; i++ {
+ link := metaHeader
+ metaHeader = new(session.RequestMetaHeader)
+ metaHeader.SetOrigin(link)
+ }
+
+ return metaHeader
+}
+
+func TestIsVerbCompatible(t *testing.T) {
+ // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
+ table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
+ aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
+ aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
+ aclsdk.OpObjectHead: {
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ },
+ aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
+ aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
+ }
+
+ verbs := []sessionSDK.ObjectVerb{
+ sessionSDK.VerbObjectPut,
+ sessionSDK.VerbObjectDelete,
+ sessionSDK.VerbObjectHead,
+ sessionSDK.VerbObjectRange,
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectGet,
+ sessionSDK.VerbObjectSearch,
+ }
+
+ var tok sessionSDK.Object
+
+ for op, list := range table {
+ for _, verb := range verbs {
+ var contains bool
+ for _, v := range list {
+ if v == verb {
+ contains = true
+ break
+ }
+ }
+
+ tok.ForVerb(verb)
+
+ require.Equal(t, contains, assertVerb(tok, op),
+ "%v in token, %s executing", verb, op)
+ }
+ }
+}
+
+func TestAssertSessionRelation(t *testing.T) {
+ var tok sessionSDK.Object
+ cnr := cidtest.ID()
+ cnrOther := cidtest.ID()
+ obj := oidtest.ID()
+ objOther := oidtest.ID()
+
+ // make sure ids differ, otherwise test won't work correctly
+ require.False(t, cnrOther.Equals(cnr))
+ require.False(t, objOther.Equals(obj))
+
+ // bind session to the container (required)
+ tok.BindContainer(cnr)
+
+ // test container-global session
+ require.NoError(t, assertSessionRelation(tok, cnr, nil))
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnrOther, nil))
+ require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
+
+ // limit the session to the particular object
+ tok.LimitByObjects(obj)
+
+ // test fixed object session (here obj arg must be non-nil everywhere)
+ require.NoError(t, assertSessionRelation(tok, cnr, &obj))
+ require.Error(t, assertSessionRelation(tok, cnr, &objOther))
+}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
deleted file mode 100644
index b96757def..000000000
--- a/pkg/services/object/ape/checker.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package ape
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-type checkerImpl struct {
- checkerCore checkercore.CheckCore
- frostFSIDClient frostfsidcore.SubjectProvider
- headerProvider HeaderProvider
- nm netmap.Source
- cnrSource container.Source
- nodePK []byte
-}
-
-func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker {
- return &checkerImpl{
- checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st),
- frostFSIDClient: frostFSIDClient,
- headerProvider: headerProvider,
- nm: nm,
- cnrSource: cnrSource,
- nodePK: nodePK,
- }
-}
-
-type Prm struct {
- Namespace string
-
- Container cid.ID
-
- // Object ID is omitted for some methods.
- Object *oid.ID
-
- // If Header is set, then object attributes and properties will be parsed from
- // a request/response's header.
- Header *objectV2.Header
-
- // Method must be represented only as a constant represented in native schema.
- Method string
-
- // Role must be representedonly as a constant represented in native schema.
- Role string
-
- // An encoded sender's public key string.
- SenderKey string
-
- // An encoded container's owner user ID.
- ContainerOwner user.ID
-
- // The request's bearer token. It is used in order to check APE overrides with the token.
- BearerToken *bearer.Token
-
- // XHeaders from the request.
- XHeaders []session.XHeader
-}
-
-var errMissingOID = errors.New("object ID is not set")
-
-// CheckAPE prepares an APE-request and checks if it is permitted by policies.
-func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
- // APE check is ignored for some inter-node requests.
- switch prm.Role {
- case nativeschema.PropertyValueContainerRoleContainer:
- return nil
- case nativeschema.PropertyValueContainerRoleIR:
- switch prm.Method {
- case nativeschema.MethodGetObject,
- nativeschema.MethodHeadObject,
- nativeschema.MethodSearchObject,
- nativeschema.MethodRangeObject,
- nativeschema.MethodHashObject:
- return nil
- default:
- }
- }
-
- r, err := c.newAPERequest(ctx, prm)
- if err != nil {
- return fmt.Errorf("failed to create ape request: %w", err)
- }
- pub, err := keys.NewPublicKeyFromString(prm.SenderKey)
- if err != nil {
- return err
- }
-
- return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{
- Request: r,
- PublicKey: pub,
- Namespace: prm.Namespace,
- Container: prm.Container,
- ContainerOwner: prm.ContainerOwner,
- BearerToken: prm.BearerToken,
- })
-}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
deleted file mode 100644
index 97eb2b2d7..000000000
--- a/pkg/services/object/ape/checker_test.go
+++ /dev/null
@@ -1,782 +0,0 @@
-package ape
-
-import (
- "context"
- "crypto/ecdsa"
- "encoding/hex"
- "errors"
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/stretchr/testify/require"
-)
-
-type headerProviderMock struct {
- m map[oid.Address]*objectSDK.Object
-}
-
-var _ HeaderProvider = (*headerProviderMock)(nil)
-
-func (h *headerProviderMock) addHeader(c cid.ID, o oid.ID, header *objectSDK.Object) {
- var addr oid.Address
- addr.SetContainer(c)
- addr.SetObject(o)
- h.m[addr] = header
-}
-
-func (h *headerProviderMock) GetHeader(_ context.Context, c cid.ID, o oid.ID, _ bool) (*objectSDK.Object, error) {
- var addr oid.Address
- addr.SetContainer(c)
- addr.SetObject(o)
- obj, ok := h.m[addr]
- if !ok {
- return nil, fmt.Errorf("address not found")
- }
- return obj, nil
-}
-
-func newHeaderProviderMock() *headerProviderMock {
- return &headerProviderMock{
- m: make(map[oid.Address]*objectSDK.Object),
- }
-}
-
-func newContainerIDSDK(t *testing.T, encodedCID string) cid.ID {
- var cnr cid.ID
- require.NoError(t, cnr.DecodeString(encodedCID))
- return cnr
-}
-
-func newObjectIDSDK(t *testing.T, encodedOID *string) *oid.ID {
- if encodedOID == nil {
- return nil
- }
- obj := new(oid.ID)
- require.NoError(t, obj.DecodeString(*encodedOID))
- return obj
-}
-
-type headerObjectSDKParams struct {
- majorVersion, minorVersion uint32
- owner user.ID
- epoch uint64
- payloadSize uint64
- typ objectSDK.Type
- payloadChecksum checksum.Checksum
- payloadHomomorphicHash checksum.Checksum
- attributes []struct {
- key string
- val string
- }
-}
-
-func stringPtr(s string) *string {
- return &s
-}
-
-func newHeaderObjectSDK(cnr cid.ID, oid *oid.ID, headerObjSDK *headerObjectSDKParams) *objectSDK.Object {
- objSDK := objectSDK.New()
- objSDK.SetContainerID(cnr)
- if oid != nil {
- objSDK.SetID(*oid)
- }
- if headerObjSDK == nil {
- return objSDK
- }
- ver := new(version.Version)
- ver.SetMajor(headerObjSDK.majorVersion)
- ver.SetMinor(headerObjSDK.minorVersion)
- objSDK.SetVersion(ver)
- objSDK.SetCreationEpoch(headerObjSDK.epoch)
- objSDK.SetOwnerID(headerObjSDK.owner)
- objSDK.SetPayloadSize(headerObjSDK.payloadSize)
- objSDK.SetType(headerObjSDK.typ)
- objSDK.SetPayloadChecksum(headerObjSDK.payloadChecksum)
- objSDK.SetPayloadHomomorphicHash(headerObjSDK.payloadHomomorphicHash)
-
- var attrs []objectSDK.Attribute
- for _, attr := range headerObjSDK.attributes {
- attrSDK := objectSDK.NewAttribute()
- attrSDK.SetKey(attr.key)
- attrSDK.SetValue(attr.val)
- attrs = append(attrs, *attrSDK)
- }
- objSDK.SetAttributes(attrs...)
-
- return objSDK
-}
-
-type testHeader struct {
- headerObjSDK *headerObjectSDKParams
-
- // If fromHeaderProvider is set, then running test should
- // consider that a header is recieved from a header provider.
- fromHeaderProvider bool
-
- // If fromHeaderProvider is set, then running test should
- // consider that a header is recieved from a message header.
- fromRequestResponseHeader bool
-}
-
-var (
- methodsRequiredOID = []string{
- nativeschema.MethodGetObject,
- nativeschema.MethodHeadObject,
- nativeschema.MethodRangeObject,
- nativeschema.MethodHashObject,
- nativeschema.MethodDeleteObject,
- }
-
- methodsOptionalOID = []string{
- nativeschema.MethodSearchObject, nativeschema.MethodPutObject,
- }
-
- namespace = "test_namespace"
-
- containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
-
- objectID = "BzQw5HH3feoxFDD5tCT87Y1726qzgLfxEE7wgtoRzB3R"
-
- groupID = "1"
-
- role = "Container"
-
- senderPrivateKey, _ = keys.NewPrivateKey()
-
- senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
-)
-
-type frostfsIDProviderMock struct {
- subjects map[util.Uint160]*client.Subject
- subjectsExtended map[util.Uint160]*client.SubjectExtended
-}
-
-var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
-
-func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
- return &frostfsIDProviderMock{
- subjects: map[util.Uint160]*client.Subject{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExtended: map[util.Uint160]*client.SubjectExtended{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 1,
- Name: "test",
- Namespace: "testnamespace",
- KV: map[string]string{
- "attr1": "value1",
- "attr2": "value2",
- },
- },
- },
- },
- },
- }
-}
-
-func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
- pk, err := keys.NewPublicKeyFromString(senderKey)
- require.NoError(t, err)
- return pk.GetScriptHash()
-}
-
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
- v, ok := f.subjects[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
- v, ok := f.subjectsExtended[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-var apeCheckTestCases = []struct {
- name string
- container string
- object *string
- methods []string
- header testHeader
- xHeaders []session.XHeader
- containerRules []chain.Rule
- groupidRules []chain.Rule
- expectAPEErr bool
-}{
- {
- name: "oid required requests are allowed",
- container: containerID,
- object: stringPtr(objectID),
- methods: methodsRequiredOID,
- containerRules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
- },
- },
- },
- },
- {
- name: "oid optional requests are allowed",
- container: containerID,
- methods: methodsOptionalOID,
- containerRules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: methodsOptionalOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- },
- {
- name: "oid required requests are denied",
- container: containerID,
- object: stringPtr(objectID),
- methods: methodsRequiredOID,
- containerRules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
- },
- },
- },
- expectAPEErr: true,
- },
- {
- name: "oid required requests are denied by an attribute",
- container: containerID,
- object: stringPtr(objectID),
- methods: methodsRequiredOID,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "attribute_value",
- },
- },
- },
- fromHeaderProvider: true,
- },
- containerRules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
- },
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringLike,
- Kind: chain.KindResource,
- Key: "attr1",
- Value: "attribute*",
- },
- },
- },
- },
- expectAPEErr: true,
- },
- {
- name: "oid required requests are denied by sender",
- container: containerID,
- object: stringPtr(objectID),
- methods: methodsRequiredOID,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "attribute_value",
- },
- },
- },
- fromHeaderProvider: true,
- },
- containerRules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
- },
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringLike,
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorPublicKey,
- Value: senderKey,
- },
- },
- },
- },
- expectAPEErr: true,
- },
- {
- name: "oid required requests are denied by xheader",
- container: containerID,
- object: stringPtr(objectID),
- methods: methodsRequiredOID,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "attribute_value",
- },
- },
- },
- fromHeaderProvider: true,
- },
- xHeaders: []session.XHeader{
- func() (xhead session.XHeader) {
- xhead.SetKey("X-Test-ID")
- xhead.SetValue("aezakmi")
- return
- }(),
- },
- containerRules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, containerID, objectID)},
- },
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringLike,
- Kind: chain.KindRequest,
- Key: fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, "X-Test-ID"),
- Value: "aezakmi",
- },
- },
- },
- },
- expectAPEErr: true,
- },
- {
- name: "optional oid requests reached quota limit by an attribute",
- container: containerID,
- methods: methodsOptionalOID,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- payloadSize: 1000,
- },
- fromRequestResponseHeader: true,
- },
- containerRules: []chain.Rule{
- {
- Status: chain.QuotaLimitReached,
- Actions: chain.Actions{Names: methodsOptionalOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindResource,
- Key: nativeschema.PropertyKeyObjectPayloadLength,
- Value: "1000",
- },
- },
- },
- },
- expectAPEErr: true,
- },
- {
- name: "optional oid requests reached quota limit by group-id",
- container: containerID,
- methods: methodsOptionalOID,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- payloadSize: 1000,
- },
- fromRequestResponseHeader: true,
- },
- groupidRules: []chain.Rule{
- {
- Status: chain.QuotaLimitReached,
- Actions: chain.Actions{Names: methodsOptionalOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: commonschema.PropertyKeyFrostFSIDGroupID,
- Value: groupID,
- },
- },
- },
- },
- expectAPEErr: true,
- },
-}
-
-type stMock struct{}
-
-func (m *stMock) CurrentEpoch() uint64 {
- return 8
-}
-
-func TestAPECheck_BearerTokenOverrides(t *testing.T) {
- for _, test := range apeCheckTestCases {
- t.Run(test.name, func(t *testing.T) {
- chain := chain.Chain{
- Rules: test.containerRules,
- MatchType: chain.MatchTypeFirstMatch,
- }
- chainSDK := apeSDK.Chain{
- Raw: chain.Bytes(),
- }
- bt := new(bearer.Token)
- bt.SetIat(1)
- bt.SetExp(10)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: apeSDK.ChainTarget{
- TargetType: apeSDK.TargetTypeContainer,
- Name: test.container,
- },
- Chains: []apeSDK.Chain{chainSDK},
- })
- bt.Sign(senderPrivateKey.PrivateKey)
- var cnrOwner user.ID
- user.IDFromKey(&cnrOwner, (ecdsa.PublicKey)(*senderPrivateKey.PublicKey()))
-
- for _, method := range test.methods {
- t.Run(method, func(t *testing.T) {
- headerProvider := newHeaderProviderMock()
- frostfsidProvider := newFrostfsIDProviderMock(t)
-
- cnr := newContainerIDSDK(t, test.container)
- obj := newObjectIDSDK(t, test.object)
-
- ls := inmemory.NewInmemoryLocalStorage()
- ms := inmemory.NewInmemoryMorphRuleChainStorage()
-
- checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
-
- prm := Prm{
- Method: method,
- Container: cnr,
- Object: obj,
- Role: role,
- ContainerOwner: cnrOwner,
- SenderKey: senderKey,
- BearerToken: bt,
- }
-
- var headerObjSDK *objectSDK.Object
- if test.header.headerObjSDK != nil {
- headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
- if test.header.fromHeaderProvider {
- require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
- headerProvider.addHeader(cnr, *obj, headerObjSDK)
- } else if test.header.fromRequestResponseHeader {
- prm.Header = headerObjSDK.ToV2().GetHeader()
- }
- }
-
- err := checker.CheckAPE(context.Background(), prm)
- if test.expectAPEErr {
- require.Error(t, err)
- } else {
- require.NoError(t, err)
- }
- })
- }
- })
- }
-}
-
-func TestAPECheck(t *testing.T) {
- for _, test := range apeCheckTestCases {
- t.Run(test.name, func(t *testing.T) {
- for _, method := range test.methods {
- t.Run(method, func(t *testing.T) {
- headerProvider := newHeaderProviderMock()
- frostfsidProvider := newFrostfsIDProviderMock(t)
-
- cnr := newContainerIDSDK(t, test.container)
- obj := newObjectIDSDK(t, test.object)
-
- ls := inmemory.NewInmemoryLocalStorage()
- ms := inmemory.NewInmemoryMorphRuleChainStorage()
-
- if len(test.containerRules) > 0 {
- ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(test.container), &chain.Chain{
- Rules: test.containerRules,
- MatchType: chain.MatchTypeFirstMatch,
- })
- }
-
- if len(test.groupidRules) > 0 {
- ls.AddOverride(chain.Ingress, policyengine.GroupTarget(":"+groupID), &chain.Chain{
- Rules: test.groupidRules,
- MatchType: chain.MatchTypeFirstMatch,
- })
- }
-
- checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
-
- prm := Prm{
- Method: method,
- Container: cnr,
- Object: obj,
- Role: role,
- SenderKey: senderKey,
- }
-
- var headerObjSDK *objectSDK.Object
- if test.header.headerObjSDK != nil {
- headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
- if test.header.fromHeaderProvider {
- require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
- headerProvider.addHeader(cnr, *obj, headerObjSDK)
- } else if test.header.fromRequestResponseHeader {
- prm.Header = headerObjSDK.ToV2().GetHeader()
- }
- }
-
- err := checker.CheckAPE(context.Background(), prm)
- if test.expectAPEErr {
- require.Error(t, err)
- } else {
- require.NoError(t, err)
- }
- })
- }
- })
- }
-}
-
-type netmapStub struct {
- netmaps map[uint64]*netmapSDK.NetMap
- currentEpoch uint64
-}
-
-func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
- if diff >= s.currentEpoch {
- return nil, errors.New("invalid diff")
- }
- return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
-}
-
-func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
- if nm, found := s.netmaps[epoch]; found {
- return nm, nil
- }
- return nil, errors.New("netmap not found")
-}
-
-func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) {
- return s.currentEpoch, nil
-}
-
-type testContainerSource struct {
- containers map[cid.ID]*container.Container
-}
-
-func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) {
- if cnr, found := s.containers[cnrID]; found {
- return cnr, nil
- }
- return nil, fmt.Errorf("container not found")
-}
-
-func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
- return nil, nil
-}
-
-func TestGetECChunk(t *testing.T) {
- headerProvider := newHeaderProviderMock()
- frostfsidProvider := newFrostfsIDProviderMock(t)
-
- cnr := newContainerIDSDK(t, containerID)
- obj := newObjectIDSDK(t, &objectID)
-
- ls := inmemory.NewInmemoryLocalStorage()
- ms := inmemory.NewInmemoryMorphRuleChainStorage()
-
- ls.AddOverride(chain.Ingress, policyengine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindResource,
- Key: "attr1",
- Value: "value",
- },
- },
- },
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: methodsRequiredOID},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- })
-
- node1Key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey(node1Key.PublicKey().Bytes())
- node2Key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- node2 := netmapSDK.NodeInfo{}
- node2.SetPublicKey(node1Key.PublicKey().Bytes())
- netmap := &netmapSDK.NetMap{}
- netmap.SetEpoch(100)
- netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2})
-
- nm := &netmapStub{
- currentEpoch: 100,
- netmaps: map[uint64]*netmapSDK.NetMap{
- 99: netmap,
- 100: netmap,
- },
- }
-
- cont := containerSDK.Container{}
- cont.Init()
- pp := netmapSDK.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("EC 1.1"))
- cont.SetPlacementPolicy(pp)
- cs := &testContainerSource{
- containers: map[cid.ID]*container.Container{
- cnr: {
- Value: cont,
- },
- },
- }
-
- checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, node1Key.PublicKey().Bytes())
-
- ecParentID := oidtest.ID()
- chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader()
- ecHeader := object.ECHeader{
- Index: 1,
- Total: 2,
- Parent: &refs.ObjectID{},
- }
- chunkHeader.SetEC(&ecHeader)
- ecParentID.WriteToV2(ecHeader.Parent)
-
- parentHeader := newHeaderObjectSDK(cnr, &ecParentID, &headerObjectSDKParams{
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "value",
- },
- },
- })
- headerProvider.addHeader(cnr, ecParentID, parentHeader)
-
- // container node requests EC parent headers, so container node denies access by matching attribute key/value
- t.Run("access denied on container node", func(t *testing.T) {
- prm := Prm{
- Method: nativeschema.MethodGetObject,
- Container: cnr,
- Object: obj,
- Role: role,
- SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()),
- Header: chunkHeader,
- }
-
- err = checker.CheckAPE(context.Background(), prm)
- require.Error(t, err)
- })
-
- // non container node has no access rights to collect EC parent header, so it uses EC chunk headers
- t.Run("access allowed on non container node", func(t *testing.T) {
- otherKey, err := keys.NewPrivateKey()
- require.NoError(t, err)
- checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes())
- prm := Prm{
- Method: nativeschema.MethodGetObject,
- Container: cnr,
- Object: obj,
- Role: nativeschema.PropertyValueContainerRoleOthers,
- SenderKey: senderKey,
- Header: chunkHeader,
- }
-
- err = checker.CheckAPE(context.Background(), prm)
- require.NoError(t, err)
- })
-}
diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go
deleted file mode 100644
index 82e660a7f..000000000
--- a/pkg/services/object/ape/errors.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ape
-
-import (
- "errors"
-
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
-)
-
-var (
- errMissingContainerID = malformedRequestError("missing container ID")
- errEmptyVerificationHeader = malformedRequestError("empty verification header")
- errEmptyBodySig = malformedRequestError("empty at body signature")
- errInvalidSessionSig = malformedRequestError("invalid session token signature")
- errInvalidSessionOwner = malformedRequestError("invalid session token owner")
- errInvalidVerb = malformedRequestError("session token verb is invalid")
-)
-
-func malformedRequestError(reason string) error {
- invalidArgErr := &apistatus.InvalidArgument{}
- invalidArgErr.SetMessage(reason)
- return invalidArgErr
-}
-
-func toStatusErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
- }
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason("ape denied request: " + err.Error())
- return errAccessDenied
-}
diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go
deleted file mode 100644
index b37c3b6f8..000000000
--- a/pkg/services/object/ape/metadata.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package ape
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-type Metadata struct {
- Container cid.ID
- Object *oid.ID
- MetaHeader *session.RequestMetaHeader
- VerificationHeader *session.RequestVerificationHeader
- SessionToken *sessionSDK.Object
- BearerToken *bearer.Token
-}
-
-func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
- if m.VerificationHeader == nil {
- return nil, nil, errEmptyVerificationHeader
- }
-
- if m.BearerToken != nil && m.BearerToken.Impersonate() {
- return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
- }
-
- // if session token is presented, use it as truth source
- if m.SessionToken != nil {
- // verify signature of session token
- return ownerFromToken(m.SessionToken)
- }
-
- // otherwise get original body signature
- bodySignature := originalBodySignature(m.VerificationHeader)
- if bodySignature == nil {
- return nil, nil, errEmptyBodySig
- }
-
- return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
-}
-
-// RequestInfo contains request information extracted by request metadata.
-type RequestInfo struct {
- // Role defines under which role this request is executed.
- // It must be represented only as a constant represented in native schema.
- Role string
-
- ContainerOwner user.ID
-
- // Namespace defines to which namespace a container is belonged.
- Namespace string
-
- // HEX-encoded sender key.
- SenderKey string
-}
-
-type RequestInfoExtractor interface {
- GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
-}
-
-type extractor struct {
- containers container.Source
-
- nm netmap.Source
-
- classifier objectCore.SenderClassifier
-}
-
-func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
- return &extractor{
- containers: containers,
- nm: nm,
- classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
- }
-}
-
-func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
- currentEpoch, err := e.nm.Epoch(ctx)
- if err != nil {
- return errors.New("can't fetch current epoch")
- }
- if sessionToken.ExpiredAt(currentEpoch) {
- return new(apistatus.SessionTokenExpired)
- }
- if sessionToken.InvalidAt(currentEpoch) {
- return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
- }
- if !assertVerb(*sessionToken, method) {
- return errInvalidVerb
- }
- return nil
-}
-
-func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
- cnr, err := e.containers.Get(ctx, m.Container)
- if err != nil {
- return ri, err
- }
-
- if m.SessionToken != nil {
- if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
- return ri, err
- }
- }
-
- ownerID, ownerKey, err := m.RequestOwner()
- if err != nil {
- return ri, err
- }
- res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
- if err != nil {
- return ri, err
- }
-
- ri.Role = nativeSchemaRole(res.Role)
- ri.ContainerOwner = cnr.Value.Owner()
-
- cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
- if hasNamespace {
- ri.Namespace = cnrNamespace
- }
-
- // it is assumed that at the moment the key will be valid,
- // otherwise the request would not pass validation
- ri.SenderKey = hex.EncodeToString(res.Key)
-
- return ri, nil
-}
-
-func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
- var sTok *sessionSDK.Object
-
- if tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err := sTok.ReadFromV2(*tokV2)
- if err != nil {
- return nil, fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- return sTok, nil
-}
diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/ape/metadata_test.go
deleted file mode 100644
index fd919008f..000000000
--- a/pkg/services/object/ape/metadata_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package ape
-
-import (
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/google/uuid"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestRequestOwner(t *testing.T) {
- containerOwner, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- userPk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- var userID user.ID
- user.IDFromKey(&userID, userPk.PrivateKey.PublicKey)
-
- var userSignature refs.Signature
- userSignature.SetKey(userPk.PublicKey().Bytes())
-
- vh := new(sessionV2.RequestVerificationHeader)
- vh.SetBodySignature(&userSignature)
-
- t.Run("empty verification header", func(t *testing.T) {
- req := Metadata{}
- checkOwner(t, req, nil, errEmptyVerificationHeader)
- })
- t.Run("empty verification header signature", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: new(sessionV2.RequestVerificationHeader),
- }
- checkOwner(t, req, nil, errEmptyBodySig)
- })
- t.Run("no tokens", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- }
- checkOwner(t, req, userPk.PublicKey(), nil)
- })
-
- t.Run("bearer without impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, false),
- }
- checkOwner(t, req, userPk.PublicKey(), nil)
- })
- t.Run("bearer with impersonate, no session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
- }
- checkOwner(t, req, containerOwner.PublicKey(), nil)
- })
- t.Run("bearer with impersonate, with session", func(t *testing.T) {
- // To check that bearer token takes priority, use different key to sign session token.
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- req := Metadata{
- VerificationHeader: vh,
- BearerToken: newBearer(t, containerOwner, userID, true),
- SessionToken: newSession(t, pk),
- }
- checkOwner(t, req, containerOwner.PublicKey(), nil)
- })
- t.Run("with session", func(t *testing.T) {
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: newSession(t, containerOwner),
- }
- checkOwner(t, req, containerOwner.PublicKey(), nil)
- })
- t.Run("malformed session token", func(t *testing.T) {
- // This test is tricky: session token has issuer field and signature, which must correspond to each other.
- // SDK prevents constructing such token in the first place, but it is still possible via API.
- // Thus, construct v2 token, convert it to SDK one and pass to our function.
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- var user1 user.ID
- user.IDFromKey(&user1, pk.PrivateKey.PublicKey)
-
- var id refs.OwnerID
- id.SetValue(user1.WalletBytes())
-
- raw, err := uuid.New().MarshalBinary()
- require.NoError(t, err)
-
- var cidV2 refs.ContainerID
- cidtest.ID().WriteToV2(&cidV2)
-
- sessionCtx := new(sessionV2.ObjectSessionContext)
- sessionCtx.SetTarget(&cidV2)
-
- var body sessionV2.TokenBody
- body.SetOwnerID(&id)
- body.SetID(raw)
- body.SetLifetime(new(sessionV2.TokenLifetime))
- body.SetSessionKey(pk.PublicKey().Bytes())
- body.SetContext(sessionCtx)
-
- var tokV2 sessionV2.Token
- tokV2.SetBody(&body)
- require.NoError(t, sigutilV2.SignData(&containerOwner.PrivateKey, smWrapper{Token: &tokV2}))
- require.NoError(t, sigutilV2.VerifyData(smWrapper{Token: &tokV2}))
-
- var tok sessionSDK.Object
- require.NoError(t, tok.ReadFromV2(tokV2))
-
- req := Metadata{
- VerificationHeader: vh,
- SessionToken: &tok,
- }
- checkOwner(t, req, nil, errInvalidSessionOwner)
- })
-}
-
-type smWrapper struct {
- *sessionV2.Token
-}
-
-func (s smWrapper) ReadSignedData(data []byte) ([]byte, error) {
- return s.Token.GetBody().StableMarshal(data), nil
-}
-
-func (s smWrapper) SignedDataSize() int {
- return s.Token.GetBody().StableSize()
-}
-
-func newSession(t *testing.T, pk *keys.PrivateKey) *sessionSDK.Object {
- var tok sessionSDK.Object
- require.NoError(t, tok.Sign(pk.PrivateKey))
- return &tok
-}
-
-func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool) *bearer.Token {
- var tok bearer.Token
- tok.SetImpersonate(impersonate)
- tok.ForUser(user)
- require.NoError(t, tok.Sign(pk.PrivateKey))
- return &tok
-}
-
-func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
- _, actual, err := req.RequestOwner()
- if expectedErr != nil {
- require.ErrorIs(t, err, expectedErr)
- return
- }
-
- require.NoError(t, err)
- require.Equal(t, expected, actual)
-}
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
deleted file mode 100644
index 001a5f71e..000000000
--- a/pkg/services/object/ape/request.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package ape
-
-import (
- "context"
- "crypto/sha256"
- "errors"
- "fmt"
- "net"
- "strconv"
-
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/peer"
-)
-
-var defaultRequest = aperequest.Request{}
-
-var errECMissingParentObjectID = errors.New("missing EC parent object ID")
-
-func nativeSchemaRole(role acl.Role) string {
- switch role {
- case acl.RoleOwner:
- return nativeschema.PropertyValueContainerRoleOwner
- case acl.RoleContainer:
- return nativeschema.PropertyValueContainerRoleContainer
- case acl.RoleInnerRing:
- return nativeschema.PropertyValueContainerRoleIR
- case acl.RoleOthers:
- return nativeschema.PropertyValueContainerRoleOthers
- default:
- return ""
- }
-}
-
-func resourceName(cid cid.ID, oid *oid.ID, namespace string) string {
- if namespace == "root" || namespace == "" {
- if oid != nil {
- return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, cid.EncodeToString(), oid.EncodeToString())
- }
- return fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString())
- }
- if oid != nil {
- return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, namespace, cid.EncodeToString(), oid.EncodeToString())
- }
- return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
-}
-
-// objectProperties collects object properties from address parameters and a header if it is passed.
-func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string {
- objectProps := map[string]string{
- nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(),
- }
-
- objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString()
-
- if oid != nil {
- objectProps[nativeschema.PropertyKeyObjectID] = oid.String()
- }
-
- if header == nil {
- return objectProps
- }
-
- objV2 := new(objectV2.Object)
- objV2.SetHeader(header)
- objSDK := objectSDK.NewFromV2(objV2)
-
- objectProps[nativeschema.PropertyKeyObjectVersion] = objSDK.Version().String()
- objectProps[nativeschema.PropertyKeyObjectOwnerID] = objSDK.OwnerID().EncodeToString()
- objectProps[nativeschema.PropertyKeyObjectCreationEpoch] = strconv.Itoa(int(objSDK.CreationEpoch()))
- objectProps[nativeschema.PropertyKeyObjectPayloadLength] = strconv.Itoa(int(objSDK.PayloadSize()))
- objectProps[nativeschema.PropertyKeyObjectType] = objSDK.Type().String()
-
- pcs, isSet := objSDK.PayloadChecksum()
- if isSet {
- objectProps[nativeschema.PropertyKeyObjectPayloadHash] = pcs.String()
- }
- hcs, isSet := objSDK.PayloadHomomorphicHash()
- if isSet {
- objectProps[nativeschema.PropertyKeyObjectHomomorphicHash] = hcs.String()
- }
-
- for _, attr := range header.GetAttributes() {
- objectProps[attr.GetKey()] = attr.GetValue()
- }
-
- return objectProps
-}
-
-// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from
-// header provided by headerProvider. If it cannot be found in headerProvider, then properties are
-// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties.
-func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Request, error) {
- switch prm.Method {
- case nativeschema.MethodGetObject,
- nativeschema.MethodHeadObject,
- nativeschema.MethodRangeObject,
- nativeschema.MethodHashObject,
- nativeschema.MethodDeleteObject,
- nativeschema.MethodPatchObject:
- if prm.Object == nil {
- return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
- }
- case nativeschema.MethodSearchObject, nativeschema.MethodPutObject:
- default:
- return defaultRequest, fmt.Errorf("unknown method: %s", prm.Method)
- }
-
- var header *objectV2.Header
- if prm.Header != nil {
- header = prm.Header
- } else if prm.Object != nil {
- headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true)
- if err == nil {
- header = headerObjSDK.ToV2().GetHeader()
- }
- }
- header, err := c.fillHeaderWithECParent(ctx, prm, header)
- if err != nil {
- return defaultRequest, fmt.Errorf("get EC parent header: %w", err)
- }
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
- nativeschema.PropertyKeyActorRole: prm.Role,
- }
-
- for _, xhead := range prm.XHeaders {
- xheadKey := fmt.Sprintf(commonschema.PropertyKeyFrostFSXHeader, xhead.GetKey())
- reqProps[xheadKey] = xhead.GetValue()
- }
-
- reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm)
- if err != nil {
- return defaultRequest, err
- }
-
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- return aperequest.NewRequest(
- prm.Method,
- aperequest.NewResource(
- resourceName(prm.Container, prm.Object, prm.Namespace),
- objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
- ),
- reqProps,
- ), nil
-}
-
-func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) {
- if header == nil {
- return header, nil
- }
- if header.GetEC() == nil {
- return header, nil
- }
- parentObjRefID := header.GetEC().Parent
- if parentObjRefID == nil {
- return nil, errECMissingParentObjectID
- }
- var parentObjID oid.ID
- if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil {
- return nil, fmt.Errorf("EC parent object ID format error: %w", err)
- }
- // only container node have access to collect parent object
- contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container)
- if err != nil {
- return nil, fmt.Errorf("check container node status: %w", err)
- }
- if !contNode {
- return header, nil
- }
- parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false)
- if err != nil {
- if isLogicalError(err) {
- return header, nil
- }
- return nil, fmt.Errorf("EC parent header request: %w", err)
- }
- return parentObj.ToV2().GetHeader(), nil
-}
-
-func isLogicalError(err error) bool {
- var errObjRemoved *apistatus.ObjectAlreadyRemoved
- var errObjNotFound *apistatus.ObjectNotFound
- return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound)
-}
-
-func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) {
- cnr, err := c.cnrSource.Get(ctx, cnrID)
- if err != nil {
- return false, err
- }
-
- nm, err := netmap.GetLatestNetworkMap(ctx, c.nm)
- if err != nil {
- return false, err
- }
- idCnr := make([]byte, sha256.Size)
- cnrID.Encode(idCnr)
-
- in, err := object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value)
- if err != nil {
- return false, err
- } else if in {
- return true, nil
- }
-
- nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm)
- if err != nil {
- return false, err
- }
-
- return object.LookupKeyInContainer(nm, c.nodePK, idCnr, cnr.Value)
-}
-
-// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) {
- if reqProps == nil {
- reqProps = make(map[string]string)
- }
- pk, err := keys.NewPublicKeyFromString(prm.SenderKey)
- if err != nil {
- return nil, err
- }
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk)
- if err != nil {
- return reqProps, err
- }
- for propertyName, properyValue := range props {
- reqProps[propertyName] = properyValue
- }
- return reqProps, nil
-}
diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go
deleted file mode 100644
index f270bf97d..000000000
--- a/pkg/services/object/ape/request_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package ape
-
-import (
- "context"
- "fmt"
- "net"
- "testing"
-
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc/peer"
-)
-
-const (
- testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y"
-
- incomingIP = "192.92.33.1"
-)
-
-func ctxWithPeerInfo() context.Context {
- return peer.NewContext(context.Background(), &peer.Peer{
- Addr: &net.TCPAddr{
- IP: net.ParseIP(incomingIP),
- Port: 41111,
- },
- })
-}
-
-func TestObjectProperties(t *testing.T) {
- for _, test := range []struct {
- name string
- container string
- object *string
- header *headerObjectSDKParams
- }{
- {
- name: "fully filled header",
- container: containerID,
- object: stringPtr(objectID),
- header: &headerObjectSDKParams{
- majorVersion: 1,
- minorVersion: 1,
- owner: usertest.ID(),
- epoch: 3,
- payloadSize: 1000,
- typ: objectSDK.TypeRegular,
- payloadChecksum: checksumtest.Checksum(),
- payloadHomomorphicHash: checksumtest.Checksum(),
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "val1",
- },
- {
- key: "attr2",
- val: "val2",
- },
- },
- },
- },
- {
- name: "partially filled header",
- container: containerID,
- header: &headerObjectSDKParams{
- majorVersion: 1,
- minorVersion: 1,
- owner: usertest.ID(),
- epoch: 3,
- attributes: []struct {
- key string
- val string
- }{
- {
- key: "attr1",
- val: "val1",
- },
- },
- },
- },
- {
- name: "only address paramaters set in header",
- container: containerID,
- object: stringPtr(objectID),
- },
- {
- name: "only container set in header",
- container: containerID,
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- cnr := newContainerIDSDK(t, test.container)
- obj := newObjectIDSDK(t, test.object)
- header := newHeaderObjectSDK(cnr, obj, test.header)
-
- var testCnrOwner user.ID
- require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
-
- props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader())
- require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID])
- require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID])
-
- if obj != nil {
- require.Equal(t, *test.object, props[nativeschema.PropertyKeyObjectID])
- }
-
- if test.header != nil {
- require.Equal(t,
- fmt.Sprintf("v%d.%d", test.header.majorVersion, test.header.minorVersion),
- props[nativeschema.PropertyKeyObjectVersion],
- )
- require.Equal(t, test.header.owner.EncodeToString(), props[nativeschema.PropertyKeyObjectOwnerID])
- require.Equal(t, fmt.Sprintf("%d", test.header.epoch), props[nativeschema.PropertyKeyObjectCreationEpoch])
- require.Equal(t, fmt.Sprintf("%d", test.header.payloadSize), props[nativeschema.PropertyKeyObjectPayloadLength])
- require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType])
- require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash])
- require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash])
-
- for _, attr := range test.header.attributes {
- require.Equal(t, attr.val, props[attr.key])
- }
- }
- })
- }
-}
-
-func TestNewAPERequest(t *testing.T) {
- tests := []struct {
- name string
- methods []string
- namespace string
- container string
- object *string
- header testHeader
- expectErr error
- }{
- {
- name: "oid required requests",
- methods: methodsRequiredOID,
- namespace: namespace,
- container: containerID,
- object: stringPtr(objectID),
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- majorVersion: 1,
- minorVersion: 1,
- owner: usertest.ID(),
- epoch: 3,
- payloadSize: 1000,
- typ: objectSDK.TypeRegular,
- payloadChecksum: checksumtest.Checksum(),
- payloadHomomorphicHash: checksumtest.Checksum(),
- },
- fromHeaderProvider: true,
- },
- },
- {
- name: "oid required requests but header cannot be found locally",
- methods: methodsRequiredOID,
- namespace: namespace,
- container: containerID,
- object: stringPtr(objectID),
- header: testHeader{},
- },
- {
- name: "oid required requests missed oid",
- methods: methodsRequiredOID,
- namespace: namespace,
- container: containerID,
- object: nil,
- header: testHeader{},
- expectErr: errMissingOID,
- },
- {
- name: "response for oid required requests",
- methods: methodsRequiredOID,
- namespace: namespace,
- container: containerID,
- object: stringPtr(objectID),
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- majorVersion: 1,
- minorVersion: 1,
- owner: usertest.ID(),
- epoch: 3,
- payloadSize: 1000,
- typ: objectSDK.TypeRegular,
- payloadChecksum: checksumtest.Checksum(),
- payloadHomomorphicHash: checksumtest.Checksum(),
- },
- fromRequestResponseHeader: true,
- },
- },
- {
- name: "oid not required methods request",
- methods: methodsOptionalOID,
- namespace: namespace,
- container: containerID,
- object: nil,
- header: testHeader{
- headerObjSDK: &headerObjectSDKParams{
- majorVersion: 6,
- minorVersion: 66,
- owner: usertest.ID(),
- epoch: 3,
- typ: objectSDK.TypeLock,
- },
- fromRequestResponseHeader: true,
- },
- },
- {
- name: "oid not required methods request but no header",
- methods: methodsOptionalOID,
- namespace: namespace,
- container: containerID,
- object: nil,
- header: testHeader{},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- for _, method := range test.methods {
- t.Run(method, func(t *testing.T) {
- cnr := newContainerIDSDK(t, test.container)
- obj := newObjectIDSDK(t, test.object)
-
- var testCnrOwner user.ID
- require.NoError(t, testCnrOwner.DecodeString(testOwnerID))
-
- prm := Prm{
- Namespace: test.namespace,
- Method: method,
- Container: cnr,
- Object: obj,
- Role: role,
- SenderKey: senderKey,
- ContainerOwner: testCnrOwner,
- }
-
- headerSource := newHeaderProviderMock()
- ffidProvider := newFrostfsIDProviderMock(t)
-
- var headerObjSDK *objectSDK.Object
- if test.header.headerObjSDK != nil {
- headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK)
- if test.header.fromHeaderProvider {
- require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider")
- headerSource.addHeader(cnr, *obj, headerObjSDK)
- } else if test.header.fromRequestResponseHeader {
- prm.Header = headerObjSDK.ToV2().GetHeader()
- }
- }
-
- c := checkerImpl{
- headerProvider: headerSource,
- frostFSIDClient: ffidProvider,
- }
-
- r, err := c.newAPERequest(ctxWithPeerInfo(), prm)
- if test.expectErr != nil {
- require.Error(t, err)
- require.ErrorIs(t, err, test.expectErr)
- return
- }
-
- expectedRequest := aperequest.NewRequest(
- method,
- aperequest.NewResource(
- resourceName(cnr, obj, prm.Namespace),
- objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
- if headerObjSDK != nil {
- return headerObjSDK.ToV2().GetHeader()
- }
- return prm.Header
- }())),
- map[string]string{
- nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
- nativeschema.PropertyKeyActorRole: prm.Role,
- fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr1"): "value1",
- fmt.Sprintf(commonschema.PropertyKeyFormatFrostFSIDUserClaim, "tag-attr2"): "value2",
- commonschema.PropertyKeyFrostFSIDGroupID: "1",
- commonschema.PropertyKeyFrostFSSourceIP: incomingIP,
- },
- )
-
- require.Equal(t, expectedRequest, r)
- })
- }
- })
- }
-}
-
-func TestResourceName(t *testing.T) {
- for _, test := range []struct {
- name string
- namespace string
- container string
- object *string
- expected string
- }{
- {
- name: "non-root namespace, CID",
- namespace: namespace,
- container: containerID,
- expected: fmt.Sprintf("native:object/%s/%s/*", namespace, containerID),
- },
- {
- name: "non-root namespace, CID, OID",
- namespace: namespace,
- container: containerID,
- object: stringPtr(objectID),
- expected: fmt.Sprintf("native:object/%s/%s/%s", namespace, containerID, objectID),
- },
- {
- name: "empty namespace, CID",
- namespace: "",
- container: containerID,
- expected: fmt.Sprintf("native:object//%s/*", containerID),
- },
- {
- name: "empty namespace, CID, OID",
- namespace: "",
- container: containerID,
- object: stringPtr(objectID),
- expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID),
- },
- {
- name: "root namespace, CID",
- namespace: "root",
- container: containerID,
- expected: fmt.Sprintf("native:object//%s/*", containerID),
- },
- {
- name: "root namespace, CID, OID",
- namespace: "root",
- container: containerID,
- object: stringPtr(objectID),
- expected: fmt.Sprintf("native:object//%s/%s", containerID, objectID),
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- cnr := newContainerIDSDK(t, test.container)
- obj := newObjectIDSDK(t, test.object)
- require.Equal(t, test.expected, resourceName(cnr, obj, test.namespace))
- })
- }
-}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
deleted file mode 100644
index e199e2638..000000000
--- a/pkg/services/object/ape/service.go
+++ /dev/null
@@ -1,471 +0,0 @@
-package ape
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
-)
-
-type Service struct {
- apeChecker Checker
-
- extractor RequestInfoExtractor
-
- next objectSvc.ServiceServer
-}
-
-var _ objectSvc.ServiceServer = (*Service)(nil)
-
-type HeaderProvider interface {
- GetHeader(ctx context.Context, cnr cid.ID, oid oid.ID, local bool) (*objectSDK.Object, error)
-}
-
-type storageEngineHeaderProvider struct {
- storageEngine *engine.StorageEngine
- getSvc *getsvc.Service
-}
-
-func (p storageEngineHeaderProvider) GetHeader(ctx context.Context, cnr cid.ID, objID oid.ID, local bool) (*objectSDK.Object, error) {
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(objID)
- if local {
- return engine.Head(ctx, p.storageEngine, addr)
- }
- w := getsvc.NewSimpleObjectWriter()
- var headPrm getsvc.HeadPrm
- headPrm.WithAddress(addr)
- headPrm.SetHeaderWriter(w)
- headPrm.SetCommonParameters(&util.CommonPrm{}) // default values are ok
- if err := p.getSvc.Head(ctx, headPrm); err != nil {
- return nil, err
- }
- return w.Object(), nil
-}
-
-func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) HeaderProvider {
- return storageEngineHeaderProvider{
- storageEngine: e,
- getSvc: s,
- }
-}
-
-func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
- return &Service{
- apeChecker: apeChecker,
- extractor: extractor,
- next: next,
- }
-}
-
-type getStreamBasicChecker struct {
- objectSvc.GetObjectStream
-
- apeChecker Checker
-
- metadata Metadata
-
- reqInfo RequestInfo
-}
-
-func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
- if partInit, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
- if err != nil {
- return toStatusErr(err)
- }
-
- prm := Prm{
- Namespace: g.reqInfo.Namespace,
- Container: cnrID,
- Object: objID,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodGetObject,
- SenderKey: g.reqInfo.SenderKey,
- ContainerOwner: g.reqInfo.ContainerOwner,
- Role: g.reqInfo.Role,
- BearerToken: g.metadata.BearerToken,
- XHeaders: resp.GetMetaHeader().GetXHeaders(),
- }
-
- if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil {
- return toStatusErr(err)
- }
- }
- return g.GetObjectStream.Send(resp)
-}
-
-func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return err
- }
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
- if err != nil {
- return err
- }
- return c.next.Get(request, &getStreamBasicChecker{
- GetObjectStream: stream,
- apeChecker: c.apeChecker,
- metadata: md,
- reqInfo: reqInfo,
- })
-}
-
-type putStreamBasicChecker struct {
- apeChecker Checker
-
- extractor RequestInfoExtractor
-
- next objectSvc.PutObjectStream
-}
-
-func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
- if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
- if err != nil {
- return err
- }
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
- if err != nil {
- return err
- }
-
- prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: partInit.GetHeader(),
- Method: nativeschema.MethodPutObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- }
-
- if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
- return toStatusErr(err)
- }
- }
-
- return p.next.Send(ctx, request)
-}
-
-func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
- streamer, err := c.next.Put(ctx)
-
- return &putStreamBasicChecker{
- apeChecker: c.apeChecker,
- extractor: c.extractor,
- next: streamer,
- }, err
-}
-
-type patchStreamBasicChecker struct {
- apeChecker Checker
-
- extractor RequestInfoExtractor
-
- next objectSvc.PatchObjectStream
-
- nonFirstSend bool
-}
-
-func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
- if !p.nonFirstSend {
- p.nonFirstSend = true
-
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return err
- }
- reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
- if err != nil {
- return err
- }
-
- prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodPatchObject,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- Role: reqInfo.Role,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- }
-
- if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
- return toStatusErr(err)
- }
- }
-
- return p.next.Send(ctx, request)
-}
-
-func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) {
- streamer, err := c.next.Patch(ctx)
-
- return &patchStreamBasicChecker{
- apeChecker: c.apeChecker,
- extractor: c.extractor,
- next: streamer,
- }, err
-}
-
-func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return nil, err
- }
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
- if err != nil {
- return nil, err
- }
-
- resp, err := c.next.Head(ctx, request)
- if err != nil {
- return nil, err
- }
-
- header := new(objectV2.Header)
- switch headerPart := resp.GetBody().GetHeaderPart().(type) {
- case *objectV2.ShortHeader:
- cidV2 := new(refs.ContainerID)
- md.Container.WriteToV2(cidV2)
- header.SetContainerID(cidV2)
- header.SetVersion(headerPart.GetVersion())
- header.SetCreationEpoch(headerPart.GetCreationEpoch())
- header.SetOwnerID(headerPart.GetOwnerID())
- header.SetObjectType(headerPart.GetObjectType())
- header.SetHomomorphicHash(header.GetHomomorphicHash())
- header.SetPayloadLength(headerPart.GetPayloadLength())
- header.SetPayloadHash(headerPart.GetPayloadHash())
- case *objectV2.HeaderWithSignature:
- header = headerPart.GetHeader()
- default:
- return resp, nil
- }
-
- err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: header,
- Method: nativeschema.MethodHeadObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- })
- if err != nil {
- return nil, toStatusErr(err)
- }
- return resp, nil
-}
-
-func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
- md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
- if err != nil {
- return err
- }
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
- if err != nil {
- return err
- }
-
- err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Method: nativeschema.MethodSearchObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- })
- if err != nil {
- return toStatusErr(err)
- }
-
- return c.next.Search(request, stream)
-}
-
-func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return nil, err
- }
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
- if err != nil {
- return nil, err
- }
-
- err = c.apeChecker.CheckAPE(ctx, Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodDeleteObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- })
- if err != nil {
- return nil, toStatusErr(err)
- }
-
- resp, err := c.next.Delete(ctx, request)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return err
- }
- reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
- if err != nil {
- return err
- }
-
- err = c.apeChecker.CheckAPE(stream.Context(), Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodRangeObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- })
- if err != nil {
- return toStatusErr(err)
- }
-
- return c.next.GetRange(request, stream)
-}
-
-func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
- if err != nil {
- return nil, err
- }
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
- if err != nil {
- return nil, err
- }
-
- prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Method: nativeschema.MethodHashObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- }
-
- resp, err := c.next.GetRangeHash(ctx, request)
- if err != nil {
- return nil, err
- }
-
- if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
- return nil, toStatusErr(err)
- }
- return resp, nil
-}
-
-func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
- md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
- if err != nil {
- return nil, err
- }
- reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
- if err != nil {
- return nil, err
- }
-
- prm := Prm{
- Namespace: reqInfo.Namespace,
- Container: md.Container,
- Object: md.Object,
- Header: request.GetBody().GetObject().GetHeader(),
- Method: nativeschema.MethodPutObject,
- Role: reqInfo.Role,
- SenderKey: reqInfo.SenderKey,
- ContainerOwner: reqInfo.ContainerOwner,
- BearerToken: md.BearerToken,
- XHeaders: md.MetaHeader.GetXHeaders(),
- }
-
- if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
- return nil, toStatusErr(err)
- }
-
- return c.next.PutSingle(ctx, request)
-}
-
-type request interface {
- GetMetaHeader() *session.RequestMetaHeader
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
- meta := request.GetMetaHeader()
- for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
- meta = origin
- }
-
- cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
- if err != nil {
- return
- }
- session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
- if err != nil {
- return
- }
- bearer, err := originalBearerToken(request.GetMetaHeader())
- if err != nil {
- return
- }
-
- md = Metadata{
- Container: cnrID,
- Object: objID,
- VerificationHeader: request.GetVerificationHeader(),
- SessionToken: session,
- BearerToken: bearer,
- }
- return
-}
diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go
deleted file mode 100644
index 97dbfa658..000000000
--- a/pkg/services/object/ape/types.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package ape
-
-import "context"
-
-// Checker provides methods to check requests and responses
-// with access policy engine.
-type Checker interface {
- CheckAPE(context.Context, Prm) error
-}
-
-// InnerRingFetcher is an interface that must provide
-// Inner Ring information.
-type InnerRingFetcher interface {
- // InnerRingKeys must return list of public keys of
- // the actual inner ring.
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-}
diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go
deleted file mode 100644
index 916bce427..000000000
--- a/pkg/services/object/ape/util_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package ape
-
-import (
- "slices"
- "testing"
-
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-func TestIsVerbCompatible(t *testing.T) {
- table := map[string][]sessionSDK.ObjectVerb{
- nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
- nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
- nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
- nativeschema.MethodHeadObject: {
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectPatch,
- },
- nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
- nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
- nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
- nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
- }
-
- verbs := []sessionSDK.ObjectVerb{
- sessionSDK.VerbObjectPut,
- sessionSDK.VerbObjectDelete,
- sessionSDK.VerbObjectHead,
- sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash,
- sessionSDK.VerbObjectGet,
- sessionSDK.VerbObjectSearch,
- sessionSDK.VerbObjectPatch,
- }
-
- var tok sessionSDK.Object
-
- for op, list := range table {
- for _, verb := range verbs {
- contains := slices.Contains(list, verb)
-
- tok.ForVerb(verb)
-
- require.Equal(t, contains, assertVerb(tok, op),
- "%v in token, %s executing", verb, op)
- }
- }
-}
-
-func TestAssertSessionRelation(t *testing.T) {
- var tok sessionSDK.Object
- cnr := cidtest.ID()
- cnrOther := cidtest.ID()
- obj := oidtest.ID()
- objOther := oidtest.ID()
-
- // make sure ids differ, otherwise test won't work correctly
- require.False(t, cnrOther.Equals(cnr))
- require.False(t, objOther.Equals(obj))
-
- // bind session to the container (required)
- tok.BindContainer(cnr)
-
- // test container-global session
- require.NoError(t, assertSessionRelation(tok, cnr, nil))
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnrOther, nil))
- require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
-
- // limit the session to the particular object
- tok.LimitByObjects(obj)
-
- // test fixed object session (here obj arg must be non-nil everywhere)
- require.NoError(t, assertSessionRelation(tok, cnr, &obj))
- require.Error(t, assertSessionRelation(tok, cnr, &objOther))
-}
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
deleted file mode 100644
index f8ee089fe..000000000
--- a/pkg/services/object/audit.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package object
-
-import (
- "context"
- "errors"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-var _ ServiceServer = (*auditService)(nil)
-
-type auditService struct {
- next ServiceServer
- log *logger.Logger
- enabled *atomic.Bool
-}
-
-func NewAuditService(next ServiceServer, log *logger.Logger, enabled *atomic.Bool) ServiceServer {
- return &auditService{
- next: next,
- log: log,
- enabled: enabled,
- }
-}
-
-// Delete implements ServiceServer.
-func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- res, err := a.next.Delete(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
- return res, err
-}
-
-// Get implements ServiceServer.
-func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error {
- err := a.next.Get(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
- return err
-}
-
-// GetRange implements ServiceServer.
-func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
- err := a.next.GetRange(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
- return err
-}
-
-// GetRangeHash implements ServiceServer.
-func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- resp, err := a.next.GetRangeHash(ctx, req)
- if !a.enabled.Load() {
- return resp, err
- }
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
- return resp, err
-}
-
-// Head implements ServiceServer.
-func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- resp, err := a.next.Head(ctx, req)
- if !a.enabled.Load() {
- return resp, err
- }
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil)
- return resp, err
-}
-
-// Put implements ServiceServer.
-func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) {
- res, err := a.next.Put(ctx)
- if !a.enabled.Load() {
- return res, err
- }
- if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false)
- return res, err
- }
- return &auditPutStream{
- stream: res,
- log: a.log,
- }, nil
-}
-
-// PutSingle implements ServiceServer.
-func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
- resp, err := a.next.PutSingle(ctx, req)
- if !a.enabled.Load() {
- return resp, err
- }
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req,
- audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(),
- req.GetBody().GetObject().GetObjectID()),
- err == nil)
- return resp, err
-}
-
-// Search implements ServiceServer.
-func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) error {
- err := a.next.Search(req, stream)
- if !a.enabled.Load() {
- return err
- }
- audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
- return err
-}
-
-var _ PutObjectStream = (*auditPutStream)(nil)
-
-type auditPutStream struct {
- stream PutObjectStream
- log *logger.Logger
-
- failed bool
- key []byte
- containerID *refs.ContainerID
- objectID *refs.ObjectID
-}
-
-// CloseAndRecv implements PutObjectStream.
-func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
- resp, err := a.stream.CloseAndRecv(ctx)
- if err != nil {
- a.failed = true
- }
- a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
- audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
- !a.failed)
- return resp, err
-}
-
-// Send implements PutObjectStream.
-func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error {
- if partInit, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartInit); ok {
- a.containerID = partInit.GetHeader().GetContainerID()
- a.objectID = partInit.GetObjectID()
- a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
- }
-
- err := a.stream.Send(ctx, req)
- if err != nil {
- a.failed = true
- }
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key,
- audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
- !a.failed)
- }
- return err
-}
-
-type auditPatchStream struct {
- stream PatchObjectStream
- log *logger.Logger
-
- failed bool
- key []byte
- containerID *refs.ContainerID
- objectID *refs.ObjectID
-
- nonFirstSend bool
-}
-
-func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) {
- res, err := a.next.Patch(ctx)
- if !a.enabled.Load() {
- return res, err
- }
- if err != nil {
- audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
- return res, err
- }
- return &auditPatchStream{
- stream: res,
- log: a.log,
- }, nil
-}
-
-// CloseAndRecv implements PatchObjectStream.
-func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
- resp, err := a.stream.CloseAndRecv(ctx)
- if err != nil {
- a.failed = true
- }
- a.objectID = resp.GetBody().GetObjectID()
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
- audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
- !a.failed)
- return resp, err
-}
-
-// Send implements PatchObjectStream.
-func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error {
- if !a.nonFirstSend {
- a.containerID = req.GetBody().GetAddress().GetContainerID()
- a.objectID = req.GetBody().GetAddress().GetObjectID()
- a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
- a.nonFirstSend = true
- }
-
- err := a.stream.Send(ctx, req)
- if err != nil {
- a.failed = true
- }
- if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
- audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
- audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
- !a.failed)
- }
- return err
-}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index ef65e78bc..73ee9f81b 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -3,7 +3,7 @@ package object
import (
"context"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
@@ -40,20 +40,12 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
+func (x *Common) Put() (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
- return x.nextHandler.Put(ctx)
-}
-
-func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) {
- if x.state.IsMaintenance() {
- return nil, new(apistatus.NodeUnderMaintenance)
- }
-
- return x.nextHandler.Patch(ctx)
+ return x.nextHandler.Put()
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/common/target/builder.go b/pkg/services/object/common/target/builder.go
deleted file mode 100644
index ea68365a7..000000000
--- a/pkg/services/object/common/target/builder.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package target
-
-import (
- "context"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
-)
-
-var _ transformer.ChunkedObjectWriter = (*inMemoryObjectBuilder)(nil)
-
-type inMemoryObjectBuilder struct {
- objectWriter transformer.ObjectWriter
- payload *payload
-
- obj *objectSDK.Object
-}
-
-func newInMemoryObjectBuilder(objectWriter transformer.ObjectWriter) *inMemoryObjectBuilder {
- return &inMemoryObjectBuilder{
- objectWriter: objectWriter,
- payload: getPayload(),
- }
-}
-
-func (b *inMemoryObjectBuilder) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
- defer func() {
- putPayload(b.payload)
- b.payload = nil
- }()
-
- b.obj.SetPayload(b.payload.Data)
-
- if err := b.objectWriter.WriteObject(ctx, b.obj); err != nil {
- return nil, err
- }
-
- id, _ := b.obj.ID()
- return &transformer.AccessIdentifiers{
- SelfID: id,
- }, nil
-}
-
-func (b *inMemoryObjectBuilder) Write(_ context.Context, p []byte) (int, error) {
- b.payload.Data = append(b.payload.Data, p...)
-
- return len(p), nil
-}
-
-func (b *inMemoryObjectBuilder) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
- b.obj = obj
-
- return nil
-}
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
deleted file mode 100644
index f2bd907db..000000000
--- a/pkg/services/object/common/target/target.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package target
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
-)
-
-func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- // prepare needed put parameters
- if err := preparePrm(ctx, &prm); err != nil {
- return nil, fmt.Errorf("could not prepare put parameters: %w", err)
- }
-
- if prm.Header.Signature() != nil {
- return newUntrustedTarget(ctx, &prm)
- }
- return newTrustedTarget(ctx, &prm)
-}
-
-func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
- if maxPayloadSz == 0 {
- return nil, errors.New("could not obtain max object size parameter")
- }
-
- if prm.SignRequestPrivateKey == nil {
- nodeKey, err := prm.Config.KeyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
- prm.SignRequestPrivateKey = nodeKey
- }
-
- // prepare untrusted-Put object target
- return &validatingPreparedTarget{
- nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)),
- fmt: prm.Config.FormatValidator,
-
- maxPayloadSz: maxPayloadSz,
- }, nil
-}
-
-func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
- prm.Relay = nil // do not relay request without signature
- maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx)
- if maxPayloadSz == 0 {
- return nil, errors.New("could not obtain max object size parameter")
- }
-
- sToken := prm.Common.SessionToken()
-
- // prepare trusted-Put object target
-
- // get private token from local storage
- var sessionInfo *util.SessionInfo
-
- if sToken != nil {
- sessionInfo = &util.SessionInfo{
- ID: sToken.ID(),
- Owner: sToken.Issuer(),
- }
- }
-
- key, err := prm.Config.KeyStorage.GetKey(sessionInfo)
- if err != nil {
- return nil, fmt.Errorf("could not receive session key: %w", err)
- }
-
- // In case session token is missing, the line above returns the default key.
- // If it isn't owner key, replication attempts will fail, thus this check.
- ownerObj := prm.Header.OwnerID()
- if ownerObj.IsEmpty() {
- return nil, errors.New("missing object owner")
- }
-
- if sToken == nil {
- var ownerSession user.ID
- user.IDFromKey(&ownerSession, key.PublicKey)
-
- if !ownerObj.Equals(ownerSession) {
- return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
- }
- } else if !ownerObj.Equals(sessionInfo.Owner) {
- return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
- }
-
- if prm.SignRequestPrivateKey == nil {
- prm.SignRequestPrivateKey = key
- }
-
- return &validatingTarget{
- fmt: prm.Config.FormatValidator,
- nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: key,
- NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) },
- NetworkState: prm.Config.NetworkState,
- MaxSize: maxPayloadSz,
- WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container),
- SessionToken: sToken,
- }),
- }, nil
-}
-
-func preparePrm(ctx context.Context, prm *objectwriter.Params) error {
- var err error
-
- // get latest network map
- nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource)
- if err != nil {
- return fmt.Errorf("could not get latest network map: %w", err)
- }
-
- idCnr, ok := prm.Header.ContainerID()
- if !ok {
- return errors.New("missing container ID")
- }
-
- // get container to store the object
- cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr)
- if err != nil {
- return fmt.Errorf("could not get container by ID: %w", err)
- }
-
- prm.Container = cnrInfo.Value
-
- // add common options
- prm.TraverseOpts = append(prm.TraverseOpts,
- // set processing container
- placement.ForContainer(prm.Container),
- )
-
- if ech := prm.Header.ECHeader(); ech != nil {
- prm.TraverseOpts = append(prm.TraverseOpts,
- // set identifier of the processing object
- placement.ForObject(ech.Parent()),
- )
- } else if id, ok := prm.Header.ID(); ok {
- prm.TraverseOpts = append(prm.TraverseOpts,
- // set identifier of the processing object
- placement.ForObject(id),
- )
- }
-
- // create placement builder from network map
- builder := placement.NewNetworkMapBuilder(nm)
-
- if prm.Common.LocalOnly() {
- // restrict success count to 1 stored copy (to local storage)
- prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1))
-
- // use local-only placement builder
- builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys)
- }
-
- // set placement builder
- prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder))
-
- return nil
-}
diff --git a/pkg/services/object/common/writer/dispatcher.go b/pkg/services/object/common/writer/dispatcher.go
deleted file mode 100644
index bb9a54ce9..000000000
--- a/pkg/services/object/common/writer/dispatcher.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package writer
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
-)
-
-var _ transformer.ObjectWriter = (*objectWriterDispatcher)(nil)
-
-type objectWriterDispatcher struct {
- ecWriter transformer.ObjectWriter
- repWriter transformer.ObjectWriter
-}
-
-func (m *objectWriterDispatcher) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- if object.IsECSupported(obj) {
- return m.ecWriter.WriteObject(ctx, obj)
- }
- return m.repWriter.WriteObject(ctx, obj)
-}
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
deleted file mode 100644
index fff58aca7..000000000
--- a/pkg/services/object/common/writer/distributed.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package writer
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
-)
-
-type preparedObjectTarget interface {
- WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error
-}
-
-type distributedWriter struct {
- cfg *Config
-
- placementOpts []placement.Option
-
- obj *objectSDK.Object
- objMeta object.ContentMeta
-
- nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget
-
- relay func(context.Context, NodeDescriptor) error
-
- resetSuccessAfterOnBroadcast bool
-}
-
-// Traversal parameters and state of container.
-type Traversal struct {
- Opts []placement.Option
-
- // need of additional broadcast after the object is saved
- ExtraBroadcastEnabled bool
-
- // container nodes which was processed during the primary object placement
- Exclude map[string]*bool
-
- ResetSuccessAfterOnBroadcast bool
-}
-
-// updates traversal parameters after the primary placement finish and
-// returns true if additional container broadcast is needed.
-func (x *Traversal) submitPrimaryPlacementFinish() bool {
- if x.ExtraBroadcastEnabled {
- // do not track success during container broadcast (best-effort)
- x.Opts = append(x.Opts, placement.WithoutSuccessTracking())
-
- if x.ResetSuccessAfterOnBroadcast {
- x.Opts = append(x.Opts, placement.ResetSuccessAfter())
- }
-
- // avoid 2nd broadcast
- x.ExtraBroadcastEnabled = false
-
- return true
- }
-
- return false
-}
-
-// marks the container node as processed during the primary object placement.
-func (x *Traversal) submitProcessed(n placement.Node, item *bool) {
- if x.ExtraBroadcastEnabled {
- key := string(n.PublicKey())
-
- if x.Exclude == nil {
- x.Exclude = make(map[string]*bool, 1)
- }
-
- x.Exclude[key] = item
- }
-}
-
-type NodeDescriptor struct {
- Local bool
-
- Info placement.Node
-}
-
-// errIncompletePut is returned if processing on a container fails.
-type errIncompletePut struct {
- singleErr error // error from the last responding node
-}
-
-func (x errIncompletePut) Error() string {
- const commonMsg = "incomplete object PUT by placement"
-
- if x.singleErr != nil {
- return fmt.Sprintf("%s: %v", commonMsg, x.singleErr)
- }
-
- return commonMsg
-}
-
-func (x errIncompletePut) Unwrap() error {
- return x.singleErr
-}
-
-// WriteObject implements the transformer.ObjectWriter interface.
-func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- t.obj = obj
-
- var err error
-
- if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil {
- return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
- }
- return t.iteratePlacement(ctx)
-}
-
-func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error {
- if !node.Local && t.relay != nil {
- return t.relay(ctx, node)
- }
-
- target := t.nodeTargetInitializer(node)
-
- err := target.WriteObject(ctx, t.obj, t.objMeta)
- if err != nil {
- return fmt.Errorf("could not write header: %w", err)
- }
- return nil
-}
-
-func (t *distributedWriter) iteratePlacement(ctx context.Context) error {
- id, _ := t.obj.ID()
-
- iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id)))
- iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
- iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
- return iter.ForEachNode(ctx, t.sendObject)
-}
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
deleted file mode 100644
index 26a53e315..000000000
--- a/pkg/services/object/common/writer/ec.go
+++ /dev/null
@@ -1,355 +0,0 @@
-package writer
-
-import (
- "context"
- "crypto/ecdsa"
- "encoding/hex"
- "errors"
- "fmt"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-var _ transformer.ObjectWriter = (*ECWriter)(nil)
-
-var (
- errUnsupportedECObject = errors.New("object is not supported for erasure coding")
- errFailedToSaveAllECParts = errors.New("failed to save all EC parts")
-)
-
-type ECWriter struct {
- Config *Config
- PlacementOpts []placement.Option
- Container containerSDK.Container
- Key *ecdsa.PrivateKey
- CommonPrm *svcutil.CommonPrm
- Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- ObjectMeta object.ContentMeta
- ObjectMetaValid bool
-
- remoteRequestSignKey *ecdsa.PrivateKey
-}
-
-func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj)
- if err != nil {
- return err
- }
- if relayed {
- return nil
- }
-
- if !object.IsECSupported(obj) {
- // must be resolved by caller
- return errUnsupportedECObject
- }
-
- if !e.ObjectMetaValid {
- if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil {
- return fmt.Errorf("(%T) could not validate payload content: %w", e, err)
- }
- e.ObjectMetaValid = true
- }
-
- if isContainerNode {
- restoreTokens := e.CommonPrm.ForgetTokens()
- defer restoreTokens()
- // As request executed on container node, so sign request with container key.
- e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil)
- if err != nil {
- return err
- }
- } else {
- e.remoteRequestSignKey = e.Key
- }
-
- if obj.ECHeader() != nil {
- return e.writeECPart(ctx, obj)
- }
- return e.writeRawObject(ctx, obj)
-}
-
-func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
- currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx)
- if err != nil {
- return false, false, err
- }
- if currentNodeIsContainerNode {
- // object can be splitted or saved local
- return false, true, nil
- }
- if e.Relay == nil {
- return false, currentNodeIsContainerNode, nil
- }
- objID := object.AddressOf(obj).Object()
- var index uint32
- if obj.ECHeader() != nil {
- objID = obj.ECHeader().Parent()
- index = obj.ECHeader().Index()
- }
- if err := e.relayToContainerNode(ctx, objID, index); err != nil {
- return false, false, err
- }
- return true, currentNodeIsContainerNode, nil
-}
-
-func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) {
- t, err := placement.NewTraverser(ctx, e.PlacementOpts...)
- if err != nil {
- return false, err
- }
- for {
- nodes := t.Next()
- if len(nodes) == 0 {
- break
- }
- for _, node := range nodes {
- if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
- return true, nil
- }
- }
- }
- return false, nil
-}
-
-func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
- if err != nil {
- return err
- }
- var lastErr error
- offset := int(index)
- for {
- nodes := t.Next()
- if len(nodes) == 0 {
- break
- }
- for idx := range nodes {
- node := nodes[(idx+offset)%len(nodes)]
- var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, node)
-
- c, err := e.Config.ClientConstructor.Get(info)
- if err != nil {
- return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
- }
-
- err = e.Relay(ctx, info, c)
- if err == nil {
- return nil
- }
- e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
- lastErr = err
- }
- }
- if lastErr == nil {
- return nil
- }
- return errIncompletePut{
- singleErr: lastErr,
- }
-}
-
-func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
- if e.CommonPrm.LocalOnly() {
- return e.writePartLocal(ctx, obj)
- }
-
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
- if err != nil {
- return err
- }
-
- eg, egCtx := errgroup.WithContext(ctx)
- for {
- nodes := t.Next()
- if len(nodes) == 0 {
- break
- }
-
- eg.Go(func() error {
- return e.writePart(egCtx, obj, int(obj.ECHeader().Index()), nodes, make([]atomic.Bool, len(nodes)))
- })
- t.SubmitSuccess()
- }
- if err := eg.Wait(); err != nil {
- return errIncompletePut{
- singleErr: err,
- }
- }
- return nil
-}
-
-func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
- // now only single EC policy is supported
- c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy()))
- if err != nil {
- return err
- }
- parts, err := c.Split(obj, e.Key)
- if err != nil {
- return err
- }
- partsProcessed := make([]atomic.Bool, len(parts))
- objID, _ := obj.ID()
- t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...)
- if err != nil {
- return err
- }
-
- for {
- eg, egCtx := errgroup.WithContext(ctx)
- nodes := t.Next()
- if len(nodes) == 0 {
- break
- }
-
- visited := make([]atomic.Bool, len(nodes))
- for idx := range parts {
- visited[idx%len(nodes)].Store(true)
- }
-
- for idx := range parts {
- if !partsProcessed[idx].Load() {
- eg.Go(func() error {
- err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
- if err == nil {
- partsProcessed[idx].Store(true)
- t.SubmitSuccess()
- }
- return err
- })
- }
- }
- err = eg.Wait()
- }
- if err != nil {
- return errIncompletePut{
- singleErr: err,
- }
- }
- for idx := range partsProcessed {
- if !partsProcessed[idx].Load() {
- return errIncompletePut{
- singleErr: errFailedToSaveAllECParts,
- }
- }
- }
- return nil
-}
-
-func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- // try to save to node for current part index
- node := nodes[partIdx%len(nodes)]
- err := e.putECPartToNode(ctx, obj, node)
- if err == nil {
- return nil
- } else if clientSDK.IsErrObjectAlreadyRemoved(err) {
- return err
- }
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
- zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
- zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
-
- partVisited := make([]bool, len(nodes))
- partVisited[partIdx%len(nodes)] = true
-
- // try to save to any node not visited by any of other parts
- for i := 1; i < len(nodes); i++ {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- idx := (partIdx + i) % len(nodes)
- if !visited[idx].CompareAndSwap(false, true) {
- continue
- }
- node = nodes[idx]
- err := e.putECPartToNode(ctx, obj, node)
- if err == nil {
- return nil
- }
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
- zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
- zap.String("node", hex.EncodeToString(node.PublicKey())),
- zap.Error(err))
-
- partVisited[idx] = true
- }
-
- // try to save to any node not visited by current part
- for i := range nodes {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if partVisited[i] {
- continue
- }
- node = nodes[i]
- err := e.putECPartToNode(ctx, obj, node)
- if err == nil {
- return nil
- }
- e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
- zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
- zap.String("node", hex.EncodeToString(node.PublicKey())),
- zap.Error(err))
- }
-
- return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj))
-}
-
-func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
- if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
- return e.writePartLocal(ctx, obj)
- }
- return e.writePartRemote(ctx, obj, node)
-}
-
-func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
- localTarget := LocalTarget{
- Storage: e.Config.LocalStore,
- Container: e.Container,
- }
- return localTarget.WriteObject(ctx, obj, e.ObjectMeta)
-}
-
-func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
- var clientNodeInfo client.NodeInfo
- client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
-
- remoteTaget := remoteWriter{
- privateKey: e.remoteRequestSignKey,
- clientConstructor: e.Config.ClientConstructor,
- commonPrm: e.CommonPrm,
- nodeInfo: clientNodeInfo,
- }
-
- return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
-}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
deleted file mode 100644
index d5eeddf21..000000000
--- a/pkg/services/object/common/writer/ec_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package writer
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "crypto/sha256"
- "errors"
- "fmt"
- "slices"
- "strconv"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type testPlacementBuilder struct {
- vectors [][]netmap.NodeInfo
-}
-
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
- [][]netmap.NodeInfo, error,
-) {
- arr := slices.Clone(p.vectors[0])
- return [][]netmap.NodeInfo{arr}, nil
-}
-
-type nmKeys struct{}
-
-func (nmKeys) IsLocalKey(_ []byte) bool {
- return false
-}
-
-type clientConstructor struct {
- vectors [][]netmap.NodeInfo
-}
-
-func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
- if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
- bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
- return multiAddressClient{err: errors.New("node unavailable")}, nil
- }
- return multiAddressClient{}, nil
-}
-
-type multiAddressClient struct {
- client.MultiAddressClient
- err error
-}
-
-func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
- if c.err != nil {
- return nil, c.err
- }
- return &apiclient.ResObjectPutSingle{}, nil
-}
-
-func (c multiAddressClient) ReportError(error) {
-}
-
-func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
- return nil
-}
-
-func TestECWriter(t *testing.T) {
- // Create container with policy EC 1.1
- cnr := container.Container{}
- p1 := netmap.PlacementPolicy{}
- p1.SetContainerBackupFactor(1)
- x1 := netmap.ReplicaDescriptor{}
- x1.SetECDataCount(1)
- x1.SetECParityCount(1)
- p1.AddReplicas(x1)
- cnr.SetPlacementPolicy(p1)
- cnr.SetAttribute("cnr", "cnr1")
-
- cid := cidtest.ID()
-
- // Create 4 nodes, 2 nodes for chunks,
- // 2 nodes for the case when the first two will fail.
- ns, _ := testNodeMatrix(t, []int{4})
-
- data := make([]byte, 100)
- _, _ = rand.Read(data)
- ver := version.Current()
-
- var csum checksum.Checksum
- csum.SetSHA256(sha256.Sum256(data))
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := objectSDK.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cid)
- obj.SetVersion(&ver)
- obj.SetPayload(data)
- obj.SetPayloadSize(uint64(len(data)))
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
-
- // Builder return nodes without sort by hrw
- builder := &testPlacementBuilder{
- vectors: ns,
- }
-
- ownerKey, err := keys.NewPrivateKey()
- require.NoError(t, err)
- nodeKey, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- log, err := logger.NewLogger(logger.Prm{})
- require.NoError(t, err)
-
- var n nmKeys
- ecw := ECWriter{
- Config: &Config{
- NetmapKeys: n,
- Logger: log,
- ClientConstructor: clientConstructor{vectors: ns},
- KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
- },
- PlacementOpts: append(
- []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
- placement.WithCopyNumbers(nil)), // copies number ignored for EC
- Container: cnr,
- Key: &ownerKey.PrivateKey,
- Relay: nil,
- ObjectMetaValid: true,
- }
-
- err = ecw.WriteObject(context.Background(), obj)
- require.NoError(t, err)
-}
-
-func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
- mNodes := make([][]netmap.NodeInfo, len(dim))
- mAddr := make([][]string, len(dim))
-
- for i := range dim {
- ns := make([]netmap.NodeInfo, dim[i])
- as := make([]string, dim[i])
-
- for j := range dim[i] {
- a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
- strconv.Itoa(i),
- strconv.Itoa(60000+j),
- )
-
- var ni netmap.NodeInfo
- ni.SetNetworkEndpoints(a)
- ni.SetPublicKey([]byte(a))
-
- var na network.AddressGroup
-
- err := na.FromIterator(netmapcore.Node(ni))
- require.NoError(t, err)
-
- as[j] = network.StringifyGroup(na)
-
- ns[j] = ni
- }
-
- mNodes[i] = ns
- mAddr[i] = as
- }
-
- return mNodes, mAddr
-}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
deleted file mode 100644
index d3d2b41b4..000000000
--- a/pkg/services/object/common/writer/writer.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package writer
-
-import (
- "context"
- "crypto/ecdsa"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
-)
-
-type MaxSizeSource interface {
- // MaxObjectSize returns maximum payload size
- // of physically stored object in system.
- //
- // Must return 0 if value can not be obtained.
- MaxObjectSize(context.Context) uint64
-}
-
-type ClientConstructor interface {
- Get(client.NodeInfo) (client.MultiAddressClient, error)
-}
-
-type InnerRing interface {
- InnerRingKeys(ctx context.Context) ([][]byte, error)
-}
-
-type FormatValidatorConfig interface {
- VerifySessionTokenIssuer() bool
-}
-
-// Config represents a set of static parameters that are established during
-// the initialization phase of all services.
-type Config struct {
- KeyStorage *objutil.KeyStorage
-
- MaxSizeSrc MaxSizeSource
-
- LocalStore ObjectStorage
-
- ContainerSource container.Source
-
- NetmapSource netmap.Source
-
- NetmapKeys netmap.AnnouncedKeys
-
- FormatValidator *object.FormatValidator
-
- NetworkState netmap.State
-
- ClientConstructor ClientConstructor
-
- Logger *logger.Logger
-
- VerifySessionTokenIssuer bool
-}
-
-type Option func(*Config)
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *Config) {
- c.Logger = l
- }
-}
-
-func WithVerifySessionTokenIssuer(v bool) Option {
- return func(c *Config) {
- c.VerifySessionTokenIssuer = v
- }
-}
-
-type Params struct {
- Config *Config
-
- Common *objutil.CommonPrm
-
- Header *objectSDK.Object
-
- Container containerSDK.Container
-
- TraverseOpts []placement.Option
-
- Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- SignRequestPrivateKey *ecdsa.PrivateKey
-}
-
-func New(prm *Params) transformer.ObjectWriter {
- if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) {
- return newECWriter(prm)
- }
- return newDefaultObjectWriter(prm, false)
-}
-
-func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter {
- var relay func(context.Context, NodeDescriptor) error
- if prm.Relay != nil {
- relay = func(ctx context.Context, node NodeDescriptor) error {
- var info client.NodeInfo
-
- client.NodeInfoFromNetmapElement(&info, node.Info)
-
- c, err := prm.Config.ClientConstructor.Get(info)
- if err != nil {
- return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
- }
-
- return prm.Relay(ctx, info, c)
- }
- }
-
- var resetSuccessAfterOnBroadcast bool
- traverseOpts := prm.TraverseOpts
- if forECPlacement && !prm.Common.LocalOnly() {
- // save non-regular and linking object to EC container.
- // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
- traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1)))
- resetSuccessAfterOnBroadcast = true
- }
-
- return &distributedWriter{
- cfg: prm.Config,
- placementOpts: traverseOpts,
- resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
- nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
- if node.Local {
- return LocalTarget{
- Storage: prm.Config.LocalStore,
- Container: prm.Container,
- }
- }
-
- rt := &remoteWriter{
- privateKey: prm.SignRequestPrivateKey,
- commonPrm: prm.Common,
- clientConstructor: prm.Config.ClientConstructor,
- }
-
- client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info)
-
- return rt
- },
- relay: relay,
- }
-}
-
-func newECWriter(prm *Params) transformer.ObjectWriter {
- return &objectWriterDispatcher{
- ecWriter: &ECWriter{
- Config: prm.Config,
- PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
- Container: prm.Container,
- Key: prm.SignRequestPrivateKey,
- CommonPrm: prm.Common,
- Relay: prm.Relay,
- },
- repWriter: newDefaultObjectWriter(prm, true),
- }
-}
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 57e33fde7..88454625d 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
return err
}
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
return nil
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index a99ba3586..aac8c8860 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -2,15 +2,13 @@ package deletesvc
import (
"context"
- "errors"
"fmt"
- "slices"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -18,8 +16,6 @@ import (
"go.uber.org/zap"
)
-var errDeleteECChunk = errors.New("invalid operation: delete EC object chunk")
-
type execCtx struct {
svc *Service
@@ -35,16 +31,16 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "DELETE"),
zap.Stringer("address", exec.address()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
-func (exec *execCtx) isLocal() bool {
+func (exec execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -68,36 +64,10 @@ func (exec *execCtx) newAddress(id oid.ID) oid.Address {
return a
}
-func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
- obj, err := exec.svc.header.head(ctx, exec)
-
- var errSplitInfo *objectSDK.SplitInfoError
- var errECInfo *objectSDK.ECInfoError
-
- switch {
- case err == nil:
- if ech := obj.ECHeader(); ech != nil {
- return errDeleteECChunk
- }
- return nil
- case errors.As(err, &errSplitInfo):
- exec.splitInfo = errSplitInfo.SplitInfo()
- exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
-
- exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
-
- if err := exec.collectMembers(ctx); err != nil {
- return err
- }
-
- exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
- return nil
- case errors.As(err, &errECInfo):
- exec.log.Debug(ctx, logs.DeleteECObjectReceived)
- return nil
- }
-
- if !apiclient.IsErrObjectAlreadyRemoved(err) {
+func (exec *execCtx) formSplitInfo(ctx context.Context) error {
+ var err error
+ exec.splitInfo, err = exec.svc.header.splitInfo(ctx, exec)
+ if err != nil && !apiclient.IsErrObjectAlreadyRemoved(err) {
// IsErrObjectAlreadyRemoved check is required because splitInfo
// implicitly performs Head request that may return ObjectAlreadyRemoved
// status that is not specified for Delete.
@@ -109,7 +79,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil {
- exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
+ exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return nil
}
@@ -132,7 +102,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID
- exec.log.Debug(ctx, logs.DeleteAssemblingChain)
+ exec.log.Debug(logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -153,7 +123,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteCollectingChildren)
+ exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
@@ -166,7 +136,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
+ exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
@@ -183,7 +153,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
for i := range members {
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
if members[i].Equals(incoming[j]) {
- incoming = slices.Delete(incoming, j, j+1)
+ incoming = append(incoming[:j], incoming[j+1:]...)
j--
}
}
@@ -206,11 +176,11 @@ func (exec *execCtx) initTombstoneObject() error {
tokenSession := exec.commonParameters().SessionToken()
if tokenSession != nil {
issuer := tokenSession.Issuer()
- exec.tombstoneObj.SetOwnerID(issuer)
+ exec.tombstoneObj.SetOwnerID(&issuer)
} else {
// make local node a tombstone object owner
localUser := exec.svc.netInfo.LocalNodeID()
- exec.tombstoneObj.SetOwnerID(localUser)
+ exec.tombstoneObj.SetOwnerID(&localUser)
}
var a objectSDK.Attribute
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 01b2d9b3f..55ce4408d 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
- exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
+ exec.log.Debug(logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
}
- exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+ exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
}
@@ -33,11 +33,21 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
+ exec.log.Debug(logs.DeleteFormingSplitInfo)
- if err := exec.formExtendedInfo(ctx); err != nil {
- return fmt.Errorf("form extended info: %w", err)
+ if err := exec.formSplitInfo(ctx); err != nil {
+ return fmt.Errorf("form split info: %w", err)
}
+ exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+
+ exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
+
+ if err := exec.collectMembers(ctx); err != nil {
+ return err
+ }
+
+ exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
+
return exec.initTombstoneObject()
}
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 1c4d7d585..b74a4c7ba 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -27,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // TombstoneLifetime must return the lifespan of the tombstones
+ // Must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // LocalNodeID returns user ID of the local storage node. Result must not be nil.
+ // Returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
@@ -41,7 +41,7 @@ type cfg struct {
header interface {
// must return (nil, nil) for PHY objects
- head(context.Context, *execCtx) (*objectSDK.Object, error)
+ splitInfo(context.Context, *execCtx) (*objectSDK.SplitInfo, error)
children(context.Context, *execCtx) ([]oid.ID, error)
@@ -69,10 +69,9 @@ func New(gs *getsvc.Service,
ps *putsvc.Service,
ni NetworkInfo,
ks *util.KeyStorage,
- opts ...Option,
-) *Service {
+ opts ...Option) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
header: &headSvcWrapper{s: gs},
searcher: &searchSvcWrapper{s: ss},
placer: &putSvcWrapper{s: ps},
@@ -92,6 +91,6 @@ func New(gs *getsvc.Service,
// WithLogger returns option to specify Delete service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
}
}
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index a78fd7747..439abca2b 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -2,6 +2,7 @@ package deletesvc
import (
"context"
+ "errors"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
@@ -30,12 +31,7 @@ func (w *headSvcWrapper) headAddress(ctx context.Context, exec *execCtx, addr oi
wr := getsvc.NewSimpleObjectWriter()
p := getsvc.HeadPrm{}
-
- if cp := exec.commonParameters(); cp != nil {
- commonParameters := *cp
- p.SetCommonParameters(&commonParameters)
- }
-
+ p.SetCommonParameters(exec.commonParameters())
p.SetHeaderWriter(wr)
p.WithRawFlag(true)
p.WithAddress(addr)
@@ -48,8 +44,19 @@ func (w *headSvcWrapper) headAddress(ctx context.Context, exec *execCtx, addr oi
return wr.Object(), nil
}
-func (w *headSvcWrapper) head(ctx context.Context, exec *execCtx) (*objectSDK.Object, error) {
- return w.headAddress(ctx, exec, exec.address())
+func (w *headSvcWrapper) splitInfo(ctx context.Context, exec *execCtx) (*objectSDK.SplitInfo, error) {
+ _, err := w.headAddress(ctx, exec, exec.address())
+
+ var errSplitInfo *objectSDK.SplitInfoError
+
+ switch {
+ case err == nil:
+ return nil, nil
+ case errors.As(err, &errSplitInfo):
+ return errSplitInfo.SplitInfo(), nil
+ default:
+ return nil, err
+ }
}
func (w *headSvcWrapper) children(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go
index 7146f0361..10dcd0e87 100644
--- a/pkg/services/object/delete/v2/service.go
+++ b/pkg/services/object/delete/v2/service.go
@@ -3,8 +3,8 @@ package deletesvc
import (
"context"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Delete operation of Object service v2.
diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go
index c57d4562a..d0db1f543 100644
--- a/pkg/services/object/delete/v2/util.go
+++ b/pkg/services/object/delete/v2/util.go
@@ -4,10 +4,10 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index e80132489..6a8c5c818 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -12,8 +12,8 @@ import (
)
func (r *request) assemble(ctx context.Context) {
- if !r.canAssembleComplexObject() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
+ if !r.canAssemble() {
+ r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -35,24 +35,26 @@ func (r *request) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
r.disableForwarding()
- r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
+ r.log.Debug(logs.GetTryingToAssembleTheObject)
- r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
+ assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r)
- r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
+ r.log.Debug(logs.GetAssemblingSplittedObject,
+ zap.Stringer("address", r.address()),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
- defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
+ defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
+ zap.Stringer("address", r.address()),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
obj, err := assembler.Assemble(ctx, r.prm.objWriter)
if err != nil {
- r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
+ r.log.Warn(logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
+ zap.Stringer("address", r.address()),
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
zap.Uint64("range_length", r.ctxRange().GetLength()),
)
@@ -117,6 +119,7 @@ func (r *request) GetObjectAndWritePayload(ctx context.Context, id oid.ID, rng *
}
p := r.prm
+ p.common = p.common.WithLocalOnly(false)
p.objWriter = w
p.rng = rng
@@ -136,15 +139,13 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
remoteStorageConstructor: r.remoteStorageConstructor,
epochSource: r.epochSource,
localStorage: r.localStorage,
- containerSource: r.containerSource,
prm: prm,
infoSplit: objectSDK.NewSplitInfo(),
- infoEC: newECInfo(),
log: r.log,
}
detachedExecutor.execute(ctx)
- return detachedExecutor.err
+ return detachedExecutor.statusError.err
}
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
deleted file mode 100644
index 59dd7fd93..000000000
--- a/pkg/services/object/get/assembleec.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package getsvc
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.uber.org/zap"
-)
-
-func (r *request) assembleEC(ctx context.Context) {
- if r.isRaw() {
- r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
- return
- }
-
- // Any access tokens are not expected to be used in the assembly process:
- // - there is no requirement to specify child objects in session/bearer
- // token for `GET`/`GETRANGE`/`RANGEHASH` requests in the API protocol,
- // and, therefore, their missing in the original request should not be
- // considered as error; on the other hand, without session for every child
- // object, it is impossible to attach bearer token in the new generated
- // requests correctly because the token has not been issued for that node's
- // key;
- // - the assembly process is expected to be handled on a container node
- // only since the requests forwarding mechanism presentation; such the
- // node should have enough rights for getting any child object by design.
- r.prm.common.ForgetTokens()
-
- // Do not use forwarding during assembly stage.
- // Request forwarding closure inherited in produced
- // `execCtx` so it should be disabled there.
- r.disableForwarding()
-
- r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
-
- // initialize epoch number
- ok := r.initEpoch(ctx)
- if !ok {
- return
- }
-
- r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
-
- r.log.Debug(ctx, logs.GetAssemblingECObject,
- zap.Uint64("range_offset", r.ctxRange().GetOffset()),
- zap.Uint64("range_length", r.ctxRange().GetLength()),
- )
- defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
- zap.Uint64("range_offset", r.ctxRange().GetOffset()),
- zap.Uint64("range_length", r.ctxRange().GetLength()),
- )
-
- obj, err := assembler.Assemble(ctx, r.prm.objWriter)
- if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
- r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
- zap.Error(err),
- zap.Uint64("range_offset", r.ctxRange().GetOffset()),
- zap.Uint64("range_length", r.ctxRange().GetLength()),
- )
- }
-
- var errRemoved *apistatus.ObjectAlreadyRemoved
- var errOutOfRange *apistatus.ObjectOutOfRange
- var errECInfo *objectSDK.ECInfoError
-
- switch {
- default:
- r.status = statusUndefined
- r.err = err
- case err == nil:
- r.status = statusOK
- r.err = nil
- r.collectedObject = obj
- case errors.As(err, &errRemoved):
- r.status = statusINHUMED
- r.err = errRemoved
- case errors.As(err, &errOutOfRange):
- r.status = statusOutOfRange
- r.err = errOutOfRange
- case errors.As(err, &errECInfo):
- r.status = statusEC
- r.err = err
- }
-}
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index b24c9417b..61de0ee99 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -2,7 +2,6 @@ package getsvc
import (
"context"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -20,7 +19,6 @@ type assembler struct {
splitInfo *objectSDK.SplitInfo
rng *objectSDK.Range
objGetter objectGetter
- head bool
currentOffset uint64
@@ -31,24 +29,18 @@ func newAssembler(
addr oid.Address,
splitInfo *objectSDK.SplitInfo,
rng *objectSDK.Range,
- objGetter objectGetter,
- head bool,
-) *assembler {
+ objGetter objectGetter) *assembler {
return &assembler{
addr: addr,
rng: rng,
splitInfo: splitInfo,
objGetter: objGetter,
- head: head,
}
}
// Assemble assembles splitted large object and writes it's content to ObjectWriter.
// It returns parent object.
func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- if a.head {
- return a.assembleHeader(ctx, writer)
- }
sourceObjectID, ok := a.getLastPartOrLinkObjectID()
if !ok {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
@@ -60,23 +52,15 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
if previousID == nil && len(childrenIDs) == 0 {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
}
-
if len(childrenIDs) > 0 {
- if a.rng != nil {
- err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer)
- } else {
- err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer)
+ if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil {
+ return nil, err
}
} else {
- if a.rng != nil {
- err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer)
- } else {
- err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer)
+ if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil {
+ return nil, err
}
}
- if err != nil {
- return nil, err
- }
return a.parentObject, nil
}
@@ -129,7 +113,7 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID)
}
to := uint64(0)
- if seekOff+seekLen >= a.currentOffset+from {
+ if seekOff+seekLen > a.currentOffset+from {
to = seekOff + seekLen - a.currentOffset
}
@@ -162,16 +146,26 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD
}
func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
+ return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true)
+ }
+
+ if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
return err
}
- return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true)
+ return writer.WriteChunk(ctx, a.parentObject.Payload())
}
func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
- return err
+ if a.rng == nil {
+ if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil {
+ return err
+ }
}
+
if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil {
return err
}
@@ -181,9 +175,16 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev
return nil
}
-func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error {
+func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error {
+ withRng := len(partRanges) > 0 && a.rng != nil
+
for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer)
+ var r *objectSDK.Range
+ if withRng {
+ r = &partRanges[i]
+ }
+
+ _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer)
if err != nil {
return err
}
@@ -192,13 +193,22 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec
}
func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, err := a.buildChain(ctx, prevID)
+ chain, rngs, err := a.buildChain(ctx, prevID)
if err != nil {
return err
}
- slices.Reverse(chain)
- return a.assemblePayloadByObjectIDs(ctx, writer, chain, false)
+ reverseRngs := len(rngs) > 0
+
+ for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 {
+ chain[left], chain[right] = chain[right], chain[left]
+
+ if reverseRngs {
+ rngs[left], rngs[right] = rngs[right], rngs[left]
+ }
+ }
+
+ return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false)
}
func (a *assembler) isChild(obj *objectSDK.Object) bool {
@@ -206,28 +216,63 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool {
return parent == nil || equalAddresses(a.addr, object.AddressOf(parent))
}
-func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) {
+func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
var (
chain []oid.ID
+ rngs []objectSDK.Range
+ from = a.rng.GetOffset()
+ to = from + a.rng.GetLength()
hasPrev = true
)
// fill the chain end-to-start
for hasPrev {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, err
- }
- if !a.isChild(head) {
- return nil, errParentAddressDiffers
+ // check that only for "range" requests,
+ // for `GET` it stops via the false `withPrev`
+ if a.rng != nil && a.currentOffset <= from {
+ break
}
- id, _ := head.ID()
- chain = append(chain, id)
+ head, err := a.objGetter.HeadObject(ctx, prevID)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !a.isChild(head) {
+ return nil, nil, errParentAddressDiffers
+ }
+
+ if a.rng != nil {
+ sz := head.PayloadSize()
+
+ a.currentOffset -= sz
+
+ if a.currentOffset < to {
+ off := uint64(0)
+ if from > a.currentOffset {
+ off = from - a.currentOffset
+ sz -= from - a.currentOffset
+ }
+
+ if to < a.currentOffset+off+sz {
+ sz = to - off - a.currentOffset
+ }
+
+ index := len(rngs)
+ rngs = append(rngs, objectSDK.Range{})
+ rngs[index].SetOffset(off)
+ rngs[index].SetLength(sz)
+
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
+ } else {
+ id, _ := head.ID()
+ chain = append(chain, id)
+ }
prevID, hasPrev = head.PreviousID()
}
- return chain, nil
+ return chain, rngs, nil
}
diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go
deleted file mode 100644
index ff213cb82..000000000
--- a/pkg/services/object/get/assembler_head.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package getsvc
-
-import (
- "context"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- var sourceObjectIDs []oid.ID
- sourceObjectID, ok := a.splitInfo.Link()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- sourceObjectID, ok = a.splitInfo.LastPart()
- if ok {
- sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
- }
- if len(sourceObjectIDs) == 0 {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- for _, sourceObjectID = range sourceObjectIDs {
- obj, err := a.getParent(ctx, sourceObjectID, writer)
- if err == nil {
- return obj, nil
- }
- }
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
-}
-
-func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
- if err != nil {
- return nil, err
- }
- parent := obj.Parent()
- if parent == nil {
- return nil, objectSDK.NewSplitInfoError(a.splitInfo)
- }
- if err := writer.WriteHeader(ctx, parent); err != nil {
- return nil, err
- }
- return obj, nil
-}
diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go
deleted file mode 100644
index 780693c40..000000000
--- a/pkg/services/object/get/assembler_range.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package getsvc
-
-import (
- "context"
- "slices"
-
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil {
- return err
- }
- return writer.WriteChunk(ctx, a.parentObject.Payload())
-}
-
-func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error {
- if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil {
- return err
- }
- if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part
- return err
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error {
- for i := range partIDs {
- _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error {
- chain, rngs, err := a.buildChainRange(ctx, prevID)
- if err != nil {
- return err
- }
-
- slices.Reverse(chain)
- slices.Reverse(rngs)
- return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs)
-}
-
-func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) {
- var (
- chain []oid.ID
- rngs []objectSDK.Range
- from = a.rng.GetOffset()
- to = from + a.rng.GetLength()
-
- hasPrev = true
- )
-
- // fill the chain end-to-start
- for hasPrev && from < a.currentOffset {
- head, err := a.objGetter.HeadObject(ctx, prevID)
- if err != nil {
- return nil, nil, err
- }
- if !a.isChild(head) {
- return nil, nil, errParentAddressDiffers
- }
-
- nextOffset := a.currentOffset - head.PayloadSize()
- clampedFrom := max(from, nextOffset)
- clampedTo := min(to, a.currentOffset)
- if clampedFrom < clampedTo {
- index := len(rngs)
- rngs = append(rngs, objectSDK.Range{})
- rngs[index].SetOffset(clampedFrom - nextOffset)
- rngs[index].SetLength(clampedTo - clampedFrom)
-
- id, _ := head.ID()
- chain = append(chain, id)
- }
-
- a.currentOffset = nextOffset
- prevID, hasPrev = head.PreviousID()
- }
-
- return chain, rngs, nil
-}
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
deleted file mode 100644
index e0a7e1da6..000000000
--- a/pkg/services/object/get/assemblerec.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package getsvc
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-var errECPartsRetrieveCompleted = errors.New("EC parts receive completed")
-
-type ecRemoteStorage interface {
- getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error)
- headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error)
-}
-
-type assemblerec struct {
- addr oid.Address
- ecInfo *ecInfo
- rng *objectSDK.Range
- remoteStorage ecRemoteStorage
- localStorage localStorage
- log *logger.Logger
- head bool
- traverserGenerator traverserGenerator
- epoch uint64
-}
-
-func newAssemblerEC(
- addr oid.Address,
- ecInfo *ecInfo,
- rng *objectSDK.Range,
- remoteStorage ecRemoteStorage,
- localStorage localStorage,
- log *logger.Logger,
- head bool,
- tg traverserGenerator,
- epoch uint64,
-) *assemblerec {
- return &assemblerec{
- addr: addr,
- rng: rng,
- ecInfo: ecInfo,
- remoteStorage: remoteStorage,
- localStorage: localStorage,
- log: log,
- head: head,
- traverserGenerator: tg,
- epoch: epoch,
- }
-}
-
-// Assemble assembles erasure-coded object and writes it's content to ObjectWriter.
-// It returns parent object.
-func (a *assemblerec) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- switch {
- case a.head:
- return a.reconstructHeader(ctx, writer)
- case a.rng != nil:
- return a.reconstructRange(ctx, writer)
- default:
- return a.reconstructObject(ctx, writer)
- }
-}
-
-func (a *assemblerec) getConstructor(cnr *container.Container) (*erasurecode.Constructor, error) {
- dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy())
- parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy())
- return erasurecode.NewConstructor(dataCount, parityCount)
-}
-
-func (a *assemblerec) reconstructHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.reconstructObjectFromParts(ctx, true)
- if err == nil {
- return obj, writer.WriteHeader(ctx, obj)
- }
- return nil, err
-}
-
-func (a *assemblerec) reconstructRange(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.reconstructObjectFromParts(ctx, false)
- if err != nil {
- return nil, err
- }
-
- from := a.rng.GetOffset()
- to := from + a.rng.GetLength()
- if pLen := uint64(len(obj.Payload())); to < from || pLen < from || pLen < to {
- return nil, &apistatus.ObjectOutOfRange{}
- }
- err = writer.WriteChunk(ctx, obj.Payload()[from:to])
- if err != nil {
- return nil, err
- }
- return obj, err
-}
-
-func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
- obj, err := a.reconstructObjectFromParts(ctx, false)
- if err == nil {
- err = writer.WriteHeader(ctx, obj.CutPayload())
- if err == nil {
- err = writer.WriteChunk(ctx, obj.Payload())
- if err != nil {
- return nil, err
- }
- }
- }
- return obj, err
-}
-
-func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) {
- objID := a.addr.Object()
- trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch)
- if err != nil {
- return nil, err
- }
- c, err := a.getConstructor(cnr)
- if err != nil {
- return nil, err
- }
- parts := a.retrieveParts(ctx, trav, cnr)
- if headers {
- return c.ReconstructHeader(parts)
- }
- return c.Reconstruct(parts)
-}
-
-func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Traverser, cnr *container.Container) []*objectSDK.Object {
- dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy())
- parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy())
-
- remoteNodes := make([]placement.Node, 0)
- for {
- batch := trav.Next()
- if len(batch) == 0 {
- break
- }
- remoteNodes = append(remoteNodes, batch...)
- }
-
- parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
- if err != nil {
- a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
- }
- return parts
-}
-
-func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placement.Node, dataCount, parityCount int) ([]*objectSDK.Object, error) {
- foundChunks := make(map[uint32]*objectSDK.Object)
- var foundChunksGuard sync.Mutex
- eg, ctx := errgroup.WithContext(ctx)
- eg.SetLimit(dataCount)
-
- for _, ch := range a.ecInfo.localChunks {
- eg.Go(func() error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- object := a.tryGetChunkFromLocalStorage(ctx, ch)
- if object == nil {
- return nil
- }
- foundChunksGuard.Lock()
- foundChunks[ch.Index] = object
- count := len(foundChunks)
- foundChunksGuard.Unlock()
- if count >= dataCount {
- return errECPartsRetrieveCompleted
- }
- return nil
- })
- }
-
- for _, node := range nodes {
- var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, node)
- eg.Go(func() error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- chunks := a.tryGetChunkListFromNode(ctx, info)
- for _, ch := range chunks {
- object := a.tryGetChunkFromRemoteStorage(ctx, info, ch)
- if object == nil {
- continue
- }
- foundChunksGuard.Lock()
- foundChunks[ch.Index] = object
- count := len(foundChunks)
- foundChunksGuard.Unlock()
- if count >= dataCount {
- return errECPartsRetrieveCompleted
- }
- }
- return nil
- })
- }
- err := eg.Wait()
- if err == nil || errors.Is(err, errECPartsRetrieveCompleted) {
- parts := make([]*objectSDK.Object, dataCount+parityCount)
- for idx, chunk := range foundChunks {
- parts[idx] = chunk
- }
- return parts, nil
- }
- return nil, err
-}
-
-func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch objectSDK.ECChunk) *objectSDK.Object {
- var objID oid.ID
- err := objID.ReadFromV2(ch.ID)
- if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
- return nil
- }
- var addr oid.Address
- addr.SetContainer(a.addr.Container())
- addr.SetObject(objID)
- var object *objectSDK.Object
- if a.head {
- object, err = a.localStorage.Head(ctx, addr, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- }
- } else {
- object, err = a.localStorage.Get(ctx, addr)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
- }
- }
- return object
-}
-
-func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.NodeInfo) []objectSDK.ECChunk {
- if chunks, found := a.ecInfo.remoteChunks[string(node.PublicKey())]; found {
- return chunks
- }
- var errECInfo *objectSDK.ECInfoError
- _, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
- if err == nil {
- a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
- return nil
- }
- if !errors.As(err, &errECInfo) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
- return nil
- }
- result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
- for _, ch := range errECInfo.ECInfo().Chunks {
- result = append(result, objectSDK.ECChunk(ch))
- }
- return result
-}
-
-func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node client.NodeInfo, ch objectSDK.ECChunk) *objectSDK.Object {
- var objID oid.ID
- err := objID.ReadFromV2(ch.ID)
- if err != nil {
- a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
- return nil
- }
- var addr oid.Address
- addr.SetContainer(a.addr.Container())
- addr.SetObject(objID)
- var object *objectSDK.Object
- if a.head {
- object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- }
- } else {
- object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
- if err != nil && !errors.Is(err, context.Canceled) {
- a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
- }
- }
- return object
-}
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index dfb31133c..d22b14192 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,25 +10,32 @@ import (
func (r *request) executeOnContainer(ctx context.Context) {
if r.isLocal() {
- r.log.Debug(ctx, logs.GetReturnResultDirectly)
+ r.log.Debug(logs.GetReturnResultDirectly)
return
}
lookupDepth := r.netmapLookupDepth()
- r.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ r.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- ok := r.initEpoch(ctx)
+ ok := r.initEpoch()
if !ok {
return
}
- localStatus := r.status
+ for {
+ if r.processCurrentEpoch(ctx) {
+ break
+ }
+
+ // check the maximum depth has been reached
+ if lookupDepth == 0 {
+ break
+ }
- for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 {
lookupDepth--
// go to the previous epoch
@@ -36,12 +43,12 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
}
-func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
- r.log.Debug(ctx, logs.ProcessEpoch,
+func (r *request) processCurrentEpoch(ctx context.Context) bool {
+ r.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
- traverser, ok := r.generateTraverser(ctx, r.address())
+ traverser, ok := r.generateTraverser(r.address())
if !ok {
return true
}
@@ -49,16 +56,12 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- if localStatus == statusEC { // possible only for raw == true and local == false
- r.status = statusEC
- } else {
- r.status = statusUndefined
- }
+ r.status = statusUndefined
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -66,7 +69,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
for i := range addrs {
select {
case <-ctx.Done():
- r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
+ r.log.Debug(logs.InterruptPlacementIterationByContext,
zap.Error(ctx.Err()),
)
@@ -82,7 +85,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
client.NodeInfoFromNetmapElement(&info, addrs[i])
if r.processNode(ctx, info) {
- r.log.Debug(ctx, logs.GetCompletingTheOperation)
+ r.log.Debug(logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 3a50308c2..e3037a70b 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -2,11 +2,9 @@ package getsvc
import (
"context"
- "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
@@ -25,7 +23,6 @@ func (s *Service) GetRange(ctx context.Context, prm RangePrm) error {
rng: prm.rng,
})
}
-
func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHashRes, error) {
hashes := make([][]byte, 0, len(prm.rngs))
@@ -75,63 +72,47 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
remoteStorageConstructor: s.remoteStorageConstructor,
epochSource: s.epochSource,
localStorage: s.localStorage,
- containerSource: s.containerSource,
prm: prm,
infoSplit: objectSDK.NewSplitInfo(),
- infoEC: newECInfo(),
- log: s.log,
}
exec.setLogger(s.log)
exec.execute(ctx)
- return exec.err
+ return exec.statusError.err
}
-func (r *request) execute(ctx context.Context) {
- r.log.Debug(ctx, logs.ServingRequest)
+func (exec *request) execute(ctx context.Context) {
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
- r.executeLocal(ctx)
+ exec.executeLocal(ctx)
- r.analyzeStatus(ctx, true)
+ exec.analyzeStatus(ctx, true)
}
-func (r *request) analyzeStatus(ctx context.Context, execCnr bool) {
+func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
- switch r.status {
+ switch exec.status {
case statusOK:
- r.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
case statusINHUMED:
- r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
+ exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
- r.assemble(ctx)
+ exec.log.Debug(logs.GetRequestedObjectIsVirtual)
+ exec.assemble(ctx)
case statusOutOfRange:
- r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
- case statusEC:
- r.log.Debug(ctx, logs.GetRequestedObjectIsEC)
- if r.isRaw() && execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
- }
- r.assembleEC(ctx)
+ exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
default:
- r.log.Debug(ctx, logs.OperationFinishedWithError,
- zap.Error(r.err),
+ exec.log.Debug(logs.OperationFinishedWithError,
+ zap.Error(exec.err),
)
- var errAccessDenied *apistatus.ObjectAccessDenied
- if execCnr && errors.As(r.err, &errAccessDenied) {
- // Local get can't return access denied error, so this error was returned by
- // write to the output stream. So there is no need to try to find object on other nodes.
- return
- }
if execCnr {
- r.executeOnContainer(ctx)
- r.analyzeStatus(ctx, false)
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 3efc72065..3ac487265 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -1,7 +1,6 @@
package getsvc
import (
- "bytes"
"context"
"crypto/ecdsa"
"crypto/rand"
@@ -11,7 +10,6 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -27,9 +25,6 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@@ -63,14 +58,10 @@ type testClient struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
-func (e testEpochReceiver) CurrentEpoch() uint64 {
- return uint64(e)
-}
-
func newTestStorage() *testStorage {
return &testStorage{
inhumed: make(map[string]struct{}),
@@ -79,7 +70,7 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) {
+func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, error) {
opts := make([]placement.Option, 0, 4)
opts = append(opts,
placement.ForContainer(g.c),
@@ -91,13 +82,10 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.
opts = append(opts, placement.ForObject(*obj))
}
- t, err := placement.NewTraverser(context.Background(), opts...)
- return t, &containerCore.Container{
- Value: g.c,
- }, err
+ return placement.NewTraverser(opts...)
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -245,7 +233,8 @@ func (whe *writeHeaderError) Error() string {
return "write header error"
}
-type writeHeaderErrorObjectWriter struct{}
+type writeHeaderErrorObjectWriter struct {
+}
func (w *writeHeaderErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error {
return &writeHeaderError{}
@@ -261,7 +250,8 @@ func (whe *writePayloadError) Error() string {
return "write payload error"
}
-type writePayloadErrorObjectWriter struct{}
+type writePayloadErrorObjectWriter struct {
+}
func (w *writePayloadErrorObjectWriter) WriteHeader(_ context.Context, _ *objectSDK.Object) error {
return nil
@@ -271,7 +261,8 @@ func (w *writePayloadErrorObjectWriter) WriteChunk(_ context.Context, _ []byte)
return &writePayloadError{}
}
-type testKeyStorage struct{}
+type testKeyStorage struct {
+}
func (ks *testKeyStorage) GetKey(_ *util.SessionInfo) (*ecdsa.PrivateKey, error) {
return &ecdsa.PrivateKey{}, nil
@@ -282,7 +273,7 @@ func TestGetLocalOnly(t *testing.T) {
newSvc := func(storage *testStorage) *Service {
return &Service{
- log: test.NewLogger(t),
+ log: test.NewLogger(t, true),
localStorage: storage,
}
}
@@ -470,7 +461,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := range dim[i] {
+ for j := 0; j < dim[i]; j++ {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -478,7 +469,6 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
var ni netmap.NodeInfo
ni.SetNetworkEndpoints(a)
- ni.SetPublicKey([]byte(a))
var na network.AddressGroup
@@ -508,7 +498,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
ids := make([]oid.ID, 0, ln)
payload := make([]byte, 0, ln*10)
- for i := range ln {
+ for i := 0; i < ln; i++ {
ids = append(ids, curID)
addr.SetObject(curID)
@@ -545,7 +535,7 @@ func TestGetRemoteSmall(t *testing.T) {
const curEpoch = 13
return &Service{
- log: test.NewLogger(t),
+ log: test.NewLogger(t, true),
localStorage: newTestStorage(),
traverserGenerator: &testTraverserGenerator{
c: cnr,
@@ -568,6 +558,21 @@ func TestGetRemoteSmall(t *testing.T) {
return p
}
+ newRngPrm := func(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
+ p := RangePrm{}
+ p.SetChunkWriter(w)
+ p.WithRawFlag(raw)
+ p.common = new(util.CommonPrm).WithLocalOnly(false)
+
+ r := objectSDK.NewRange()
+ r.SetOffset(off)
+ r.SetLength(ln)
+
+ p.SetRange(r)
+
+ return p
+ }
+
newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm {
p := HeadPrm{}
p.SetHeaderWriter(w)
@@ -730,7 +735,7 @@ func TestGetRemoteSmall(t *testing.T) {
t.Run("VIRTUAL", func(t *testing.T) {
testHeadVirtual := func(svc *Service, addr oid.Address, i *objectSDK.SplitInfo) {
- headPrm := newHeadPrm(true, nil)
+ headPrm := newHeadPrm(false, nil)
headPrm.WithAddress(addr)
errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo())
@@ -1284,6 +1289,7 @@ func TestGetRemoteSmall(t *testing.T) {
err := svc.GetRange(ctx, p)
require.ErrorAs(t, err, new(*apistatus.ObjectOutOfRange))
})
+
})
t.Run("right child", func(t *testing.T) {
@@ -1626,203 +1632,6 @@ func TestGetRemoteSmall(t *testing.T) {
})
}
-type testTarget struct {
- objects []*objectSDK.Object
-}
-
-func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
- tt.objects = append(tt.objects, obj)
- return nil
-}
-
-func objectChain(t *testing.T, cnr cid.ID, singleSize, totalSize uint64) (oid.ID, []*objectSDK.Object, *objectSDK.Object, []byte) {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- tt := new(testTarget)
- p := transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: &pk.PrivateKey,
- NextTargetInit: func() transformer.ObjectWriter { return tt },
- NetworkState: testEpochReceiver(1),
- MaxSize: singleSize,
- })
-
- payload := make([]byte, totalSize)
- _, err = rand.Read(payload)
- require.NoError(t, err)
-
- ver := version.Current()
- hdr := objectSDK.New()
- hdr.SetContainerID(cnr)
- hdr.SetType(objectSDK.TypeRegular)
- hdr.SetVersion(&ver)
-
- ctx := context.Background()
- require.NoError(t, p.WriteHeader(ctx, hdr))
-
- _, err = p.Write(ctx, payload)
- require.NoError(t, err)
-
- res, err := p.Close(ctx)
- require.NoError(t, err)
-
- if totalSize <= singleSize {
- // Small object, no linking.
- require.Len(t, tt.objects, 1)
- return res.SelfID, tt.objects, nil, payload
- }
-
- return *res.ParentID, tt.objects[:len(tt.objects)-1], tt.objects[len(tt.objects)-1], bytes.Clone(payload)
-}
-
-func newRngPrm(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
- p := RangePrm{}
- p.SetChunkWriter(w)
- p.WithRawFlag(raw)
- p.common = new(util.CommonPrm)
-
- r := objectSDK.NewRange()
- r.SetOffset(off)
- r.SetLength(ln)
-
- p.SetRange(r)
- return p
-}
-
-func TestGetRange(t *testing.T) {
- var cnr container.Container
- cnr.SetPlacementPolicy(netmaptest.PlacementPolicy())
-
- var idCnr cid.ID
- container.CalculateID(&idCnr, cnr)
-
- ns, as := testNodeMatrix(t, []int{2})
-
- testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
- w := NewSimpleObjectWriter()
- rngPrm := newRngPrm(false, w, from, to-from)
- rngPrm.WithAddress(addr)
-
- err := svc.GetRange(context.Background(), rngPrm)
- require.NoError(t, err)
- if from == to {
- require.Nil(t, w.Object().Payload())
- } else {
- require.Equal(t, payload[from:to], w.Object().Payload())
- }
- }
-
- newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
- const curEpoch = 13
-
- return &Service{
- log: test.NewLogger(t),
- localStorage: newTestStorage(),
- traverserGenerator: &testTraverserGenerator{
- c: cnr,
- b: map[uint64]placement.Builder{
- curEpoch: b,
- },
- },
- epochSource: testEpochReceiver(curEpoch),
- remoteStorageConstructor: c,
- keyStore: &testKeyStorage{},
- }
- }
-
- t.Run("small", func(t *testing.T) {
- const totalSize = 5
- _, objs, _, payload := objectChain(t, idCnr, totalSize, totalSize)
- require.Len(t, objs, 1)
- require.Len(t, payload, totalSize)
-
- obj := objs[0]
- addr := object.AddressOf(obj)
- builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{addr.EncodeToString(): ns}}
-
- c1 := newTestClient()
- c1.addResult(addr, obj, nil)
-
- svc := newSvc(builder, &testClientCache{
- clients: map[string]*testClient{
- as[0][0]: c1,
- as[0][1]: c1,
- },
- })
-
- for from := range totalSize - 1 {
- for to := from; to < totalSize; to++ {
- t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
- testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
- })
- }
- }
- })
- t.Run("big", func(t *testing.T) {
- const totalSize = 9
- id, objs, link, payload := objectChain(t, idCnr, 3, totalSize) // 3 parts
- require.Equal(t, totalSize, len(payload))
-
- builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{}}
- builder.vectors[idCnr.EncodeToString()+"/"+id.EncodeToString()] = ns
- builder.vectors[object.AddressOf(link).EncodeToString()] = ns
- for i := range objs {
- builder.vectors[object.AddressOf(objs[i]).EncodeToString()] = ns
- }
-
- var addr oid.Address
- addr.SetContainer(idCnr)
- addr.SetObject(id)
-
- const (
- linkingLast = "splitinfo=last"
- linkingChildren = "splitinfo=children"
- linkingBoth = "splitinfo=both"
- )
-
- lastID, _ := objs[len(objs)-1].ID()
- linkID, _ := link.ID()
-
- for _, kind := range []string{linkingLast, linkingChildren, linkingBoth} {
- t.Run(kind, func(t *testing.T) {
- c1 := newTestClient()
- for i := range objs {
- c1.addResult(object.AddressOf(objs[i]), objs[i], nil)
- }
-
- c1.addResult(object.AddressOf(link), link, nil)
-
- si := objectSDK.NewSplitInfo()
- switch kind {
- case linkingLast:
- si.SetLastPart(lastID)
- case linkingChildren:
- si.SetLink(linkID)
- case linkingBoth:
- si.SetLastPart(lastID)
- si.SetLink(linkID)
- }
- c1.addResult(addr, nil, objectSDK.NewSplitInfoError(si))
-
- svc := newSvc(builder, &testClientCache{
- clients: map[string]*testClient{
- as[0][0]: c1,
- as[0][1]: c1,
- },
- })
-
- for from := range totalSize - 1 {
- for to := from; to < totalSize; to++ {
- t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
- testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
- })
- }
- }
- })
- }
- })
-}
-
func TestGetFromPastEpoch(t *testing.T) {
ctx := context.Background()
@@ -1858,7 +1667,7 @@ func TestGetFromPastEpoch(t *testing.T) {
const curEpoch = 13
svc := &Service{
- log: test.NewLogger(t),
+ log: test.NewLogger(t, true),
localStorage: newTestStorage(),
epochSource: testEpochReceiver(curEpoch),
traverserGenerator: &testTraverserGenerator{
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
deleted file mode 100644
index 83ef54744..000000000
--- a/pkg/services/object/get/getrangeec_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package getsvc
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "testing"
-
- coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-type containerStorage struct {
- cnt *container.Container
-}
-
-func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) {
- coreCnt := coreContainer.Container{
- Value: *cs.cnt,
- }
- return &coreCnt, nil
-}
-
-func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) {
- return nil, nil
-}
-
-func TestGetRangeEC(t *testing.T) {
- var dataCount uint32 = 3
- var parityCount uint32 = 1
- cnr := container.Container{}
- p := netmap.PlacementPolicy{}
- p.SetContainerBackupFactor(1)
- x := netmap.ReplicaDescriptor{}
- x.SetECDataCount(dataCount)
- x.SetECParityCount(parityCount)
- p.AddReplicas(x)
- cnr.SetPlacementPolicy(p)
-
- var idCnr cid.ID
- container.CalculateID(&idCnr, cnr)
-
- ns, as := testNodeMatrix(t, []int{4})
-
- testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
- w := NewSimpleObjectWriter()
- rngPrm := newRngPrm(false, w, from, to-from)
- rngPrm.WithAddress(addr)
-
- err := svc.GetRange(context.Background(), rngPrm)
- require.NoError(t, err)
- if from == to {
- require.Nil(t, w.Object().Payload())
- } else {
- require.Equal(t, payload[from:to], w.Object().Payload())
- }
- }
-
- newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
- const curEpoch = 13
-
- return &Service{
- log: test.NewLogger(t),
- localStorage: newTestStorage(),
- traverserGenerator: &testTraverserGenerator{
- c: cnr,
- b: map[uint64]placement.Builder{
- curEpoch: b,
- },
- },
- epochSource: testEpochReceiver(curEpoch),
- remoteStorageConstructor: c,
- keyStore: &testKeyStorage{},
- containerSource: &containerStorage{
- cnt: &cnr,
- },
- }
- }
- const totalSize = 5
- obj, parts := objectECChain(t, &idCnr, &cnr, totalSize, totalSize)
- require.Len(t, parts, int(dataCount+parityCount))
- require.Len(t, obj.Payload(), totalSize)
-
- addr := object.AddressOf(obj)
- builder := &testPlacementBuilder{
- vectors: map[string][][]netmap.NodeInfo{
- addr.EncodeToString(): ns,
- },
- }
-
- clients := map[string]*testClient{}
- for i, part := range parts {
- builder.vectors[object.AddressOf(part).EncodeToString()] = ns
-
- tc := newTestClient()
-
- ecInfo := objectSDK.NewECInfo()
-
- chunk := objectSDK.ECChunk{}
- chunk.Total = uint32(len(parts))
- chunk.Index = uint32(i)
- id, _ := part.ID()
- idv2 := refs.ObjectID{}
- id.WriteToV2(&idv2)
- chunk.ID = idv2
-
- ecInfo.AddChunk(chunk)
- errECInfo := objectSDK.NewECInfoError(ecInfo)
-
- tc.addResult(addr, nil, errECInfo)
- tc.addResult(object.AddressOf(part), part, nil)
-
- clients[as[0][i]] = tc
- }
-
- svc := newSvc(builder, &testClientCache{
- clients: clients,
- })
-
- for from := range totalSize - 1 {
- for to := from; to < totalSize; to++ {
- t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
- testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
- })
- }
- }
-}
-
-func objectECChain(t *testing.T, cnrId *cid.ID, cnr *container.Container, singleSize, totalSize uint64) (*objectSDK.Object, []*objectSDK.Object) {
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- tt := new(testTarget)
- p := transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: &pk.PrivateKey,
- NextTargetInit: func() transformer.ObjectWriter { return tt },
- NetworkState: testEpochReceiver(1),
- MaxSize: singleSize,
- })
-
- payload := make([]byte, totalSize)
- _, err = rand.Read(payload)
- require.NoError(t, err)
-
- ver := version.Current()
- hdr := objectSDK.New()
- hdr.SetContainerID(*cnrId)
- hdr.SetType(objectSDK.TypeRegular)
- hdr.SetVersion(&ver)
-
- ctx := context.Background()
- require.NoError(t, p.WriteHeader(ctx, hdr))
-
- _, err = p.Write(ctx, payload)
- require.NoError(t, err)
-
- _, err = p.Close(ctx)
- require.NoError(t, err)
-
- require.Len(t, tt.objects, 1)
-
- c, err := erasurecode.NewConstructor(policy.ECDataCount(cnr.PlacementPolicy()), policy.ECParityCount(cnr.PlacementPolicy()))
- require.NoError(t, err)
- parts, err := c.Split(tt.objects[0], &pk.PrivateKey)
- require.NoError(t, err)
-
- return tt.objects[0], parts
-}
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index cfabb082f..257465019 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -22,7 +22,6 @@ func (r *request) executeLocal(ctx context.Context) {
r.collectedObject, err = r.get(ctx)
var errSplitInfo *objectSDK.SplitInfoError
- var errECInfo *objectSDK.ECInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
var errOutOfRange *apistatus.ObjectOutOfRange
@@ -31,7 +30,7 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
+ r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -43,9 +42,6 @@ func (r *request) executeLocal(ctx context.Context) {
r.status = statusVIRTUAL
mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo())
r.err = objectSDK.NewSplitInfoError(r.infoSplit)
- case errors.As(err, &errECInfo):
- r.status = statusEC
- r.err = r.infoEC.addLocal(errECInfo.ECInfo())
case errors.As(err, &errOutOfRange):
r.status = statusOutOfRange
r.err = errOutOfRange
diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go
index 94c07381c..cbdb7a3e2 100644
--- a/pkg/services/object/get/prm.go
+++ b/pkg/services/object/get/prm.go
@@ -124,10 +124,6 @@ func (p *commonPrm) SetRequestForwarder(f RequestForwarder) {
p.forwarder = f
}
-func (p *commonPrm) SetSignerKey(signerKey *ecdsa.PrivateKey) {
- p.signerKey = signerKey
-}
-
// WithAddress sets object address to be read.
func (p *commonPrm) WithAddress(addr oid.Address) {
p.addr = addr
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 78ca5b5e3..4434f036a 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -2,7 +2,6 @@ package getsvc
import (
"context"
- "encoding/hex"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
@@ -18,9 +16,9 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+ r.log.Debug(logs.ProcessingNode)
- rs, ok := r.getRemoteStorage(ctx, info)
+ rs, ok := r.getRemoteStorage(info)
if !ok {
return true
}
@@ -28,24 +26,15 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
obj, err := r.getRemote(ctx, rs, info)
var errSplitInfo *objectSDK.SplitInfoError
- var errECInfo *objectSDK.ECInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
var errOutOfRange *apistatus.ObjectOutOfRange
- var errAccessDenied *apistatus.ObjectAccessDenied
switch {
default:
- r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
- if r.status != statusEC {
- // for raw requests, continue to collect other parts
- r.status = statusUndefined
- if errors.As(err, &errAccessDenied) {
- r.err = err
- } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
- r.err = new(apistatus.ObjectNotFound)
- }
- }
- return false
+ r.status = statusUndefined
+ r.err = new(apistatus.ObjectNotFound)
+
+ r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -57,28 +46,19 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
r.collectedObject = obj
r.writeCollectedObject(ctx)
}
- return true
case errors.As(err, &errRemoved):
r.status = statusINHUMED
r.err = errRemoved
- return true
case errors.As(err, &errOutOfRange):
r.status = statusOutOfRange
r.err = errOutOfRange
- return true
case errors.As(err, &errSplitInfo):
r.status = statusVIRTUAL
mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo())
r.err = objectSDK.NewSplitInfoError(r.infoSplit)
- return true
- case errors.As(err, &errECInfo):
- r.status = statusEC
- r.err = r.infoEC.addRemote(string(info.PublicKey()), errECInfo.ECInfo())
- if r.isRaw() {
- return false // continue to collect all parts
- }
- return true
}
+
+ return r.status != statusUndefined
}
func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) {
@@ -114,50 +94,3 @@ func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.N
return rs.Get(ctx, r.address(), prm)
}
-
-func (r *request) getObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo) (*objectSDK.Object, error) {
- rs, err := r.remoteStorageConstructor.Get(info)
- if err != nil {
- return nil, err
- }
-
- key, err := r.key()
- if err != nil {
- return nil, err
- }
-
- prm := RemoteRequestParams{
- Epoch: r.curProcEpoch,
- TTL: 1,
- PrivateKey: key,
- SessionToken: r.prm.common.SessionToken(),
- BearerToken: r.prm.common.BearerToken(),
- XHeaders: r.prm.common.XHeaders(),
- }
-
- return rs.Get(ctx, addr, prm)
-}
-
-func (r *request) headObjectFromNode(ctx context.Context, addr oid.Address, info client.NodeInfo, raw bool) (*objectSDK.Object, error) {
- rs, err := r.remoteStorageConstructor.Get(info)
- if err != nil {
- return nil, err
- }
-
- key, err := r.key()
- if err != nil {
- return nil, err
- }
-
- prm := RemoteRequestParams{
- Epoch: r.curProcEpoch,
- TTL: 1,
- PrivateKey: key,
- SessionToken: r.prm.common.SessionToken(),
- BearerToken: r.prm.common.BearerToken(),
- XHeaders: r.prm.common.XHeaders(),
- IsRaw: raw,
- }
-
- return rs.Head(ctx, addr, prm)
-}
diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go
deleted file mode 100644
index 2c64244cf..000000000
--- a/pkg/services/object/get/remote_getter.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package getsvc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
-)
-
-type RemoteGetPrm struct {
- Address oid.Address
- Node netmapSDK.NodeInfo
-}
-
-type RemoteGetter struct {
- s remoteStorageConstructor
- es epochSource
- ks keyStorage
-}
-
-func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Object, error) {
- var nodeInfo client.NodeInfo
- if err := client.NodeInfoFromRawNetmapElement(&nodeInfo, netmapCore.Node(prm.Node)); err != nil {
- return nil, err
- }
- rs, err := g.s.Get(nodeInfo)
- if err != nil {
- return nil, err
- }
- epoch, err := g.es.Epoch(ctx)
- if err != nil {
- return nil, err
- }
- key, err := g.ks.GetKey(nil)
- if err != nil {
- return nil, err
- }
- r := RemoteRequestParams{
- Epoch: epoch,
- TTL: 1,
- PrivateKey: key,
- }
- return rs.Get(ctx, prm.Address, r)
-}
-
-func NewRemoteGetter(cc clientConstructor, es epochSource, ks keyStorage) *RemoteGetter {
- return &RemoteGetter{
- s: &multiclientRemoteStorageConstructor{clientConstructor: cc},
- es: es,
- ks: ks,
- }
-}
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 268080486..b9223a637 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -23,8 +22,6 @@ type request struct {
infoSplit *objectSDK.SplitInfo
- infoEC *ecInfo
-
log *logger.Logger
collectedObject *objectSDK.Object
@@ -36,7 +33,6 @@ type request struct {
traverserGenerator traverserGenerator
remoteStorageConstructor remoteStorageConstructor
localStorage localStorage
- containerSource container.Source
}
func (r *request) setLogger(l *logger.Logger) {
@@ -47,14 +43,14 @@ func (r *request) setLogger(l *logger.Logger) {
req = "GET_RANGE"
}
- r.log = l.With(
+ r.log = &logger.Logger{Logger: l.With(
zap.String("request", req),
zap.Stringer("address", r.address()),
zap.Bool("raw", r.isRaw()),
zap.Bool("local", r.isLocal()),
zap.Bool("with session", r.prm.common.SessionToken() != nil),
zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
- )
+ )}
}
func (r *request) isLocal() bool {
@@ -88,8 +84,8 @@ func (r *request) key() (*ecdsa.PrivateKey, error) {
return r.keyStore.GetKey(sessionInfo)
}
-func (r *request) canAssembleComplexObject() bool {
- return !r.isRaw()
+func (r *request) canAssemble() bool {
+ return !r.isRaw() && !r.headOnly()
}
func (r *request) splitInfo() *objectSDK.SplitInfo {
@@ -116,20 +112,20 @@ func (r *request) netmapLookupDepth() uint64 {
return r.prm.common.NetmapLookupDepth()
}
-func (r *request) initEpoch(ctx context.Context) bool {
+func (r *request) initEpoch() bool {
r.curProcEpoch = r.netmapEpoch()
if r.curProcEpoch > 0 {
return true
}
- e, err := r.epochSource.Epoch(ctx)
+ e, err := r.epochSource.Epoch()
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+ r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
return false
case err == nil:
@@ -138,17 +134,17 @@ func (r *request) initEpoch(ctx context.Context) bool {
}
}
-func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) {
+func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) {
obj := addr.Object()
- t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch)
+ t, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch)
switch {
default:
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
return nil, false
case err == nil:
@@ -156,13 +152,13 @@ func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*pla
}
}
-func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) {
+func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) {
rs, err := r.remoteStorageConstructor.Get(info)
if err != nil {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient)
+ r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
return nil, false
}
@@ -185,7 +181,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
@@ -206,7 +202,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
r.status = statusUndefined
r.err = err
- r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+ r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
case err == nil:
r.status = statusOK
r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index a103f5a7f..bdf01a977 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -1,7 +1,6 @@
package getsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -17,7 +16,6 @@ type Service struct {
epochSource epochSource
keyStore keyStorage
remoteStorageConstructor remoteStorageConstructor
- containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -28,13 +26,12 @@ func New(
e localStorageEngine,
tg traverserGenerator,
cc clientConstructor,
- cs container.Source,
opts ...Option,
) *Service {
result := &Service{
keyStore: ks,
epochSource: es,
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
localStorage: &engineLocalStorage{
engine: e,
},
@@ -42,7 +39,6 @@ func New(
remoteStorageConstructor: &multiclientRemoteStorageConstructor{
clientConstructor: cc,
},
- containerSource: cs,
}
for _, option := range opts {
option(result)
@@ -53,6 +49,6 @@ func New(
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(s *Service) {
- s.log = l
+ s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
}
}
diff --git a/pkg/services/object/get/status.go b/pkg/services/object/get/status.go
index 919338d7f..3a5eebe32 100644
--- a/pkg/services/object/get/status.go
+++ b/pkg/services/object/get/status.go
@@ -6,7 +6,6 @@ const (
statusINHUMED
statusVIRTUAL
statusOutOfRange
- statusEC
)
type statusError struct {
diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go
index 664366d1b..a866132cc 100644
--- a/pkg/services/object/get/types.go
+++ b/pkg/services/object/get/types.go
@@ -6,7 +6,6 @@ import (
"errors"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -20,11 +19,11 @@ import (
)
type epochSource interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
type traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, error)
}
type keyStorage interface {
@@ -237,51 +236,3 @@ type RangeHashRes struct {
func (r *RangeHashRes) Hashes() [][]byte {
return r.hashes
}
-
-type ecInfo struct {
- localChunks []objectSDK.ECChunk
- remoteChunks map[string][]objectSDK.ECChunk // node pk -> chunk slice
-}
-
-func newECInfo() *ecInfo {
- return &ecInfo{
- localChunks: make([]objectSDK.ECChunk, 0),
- remoteChunks: make(map[string][]objectSDK.ECChunk),
- }
-}
-
-func (e *ecInfo) addLocal(ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError {
- for _, ch := range ecInfo.Chunks {
- e.localChunks = append(e.localChunks, objectSDK.ECChunk(ch))
- }
- return e.createECInfoErr()
-}
-
-func (e *ecInfo) addRemote(nodePK string, ecInfo *objectSDK.ECInfo) *objectSDK.ECInfoError {
- for _, ch := range ecInfo.Chunks {
- e.remoteChunks[nodePK] = append(e.remoteChunks[nodePK], objectSDK.ECChunk(ch))
- }
- return e.createECInfoErr()
-}
-
-func (e *ecInfo) createECInfoErr() *objectSDK.ECInfoError {
- unique := make(map[string]struct{})
- result := objectSDK.NewECInfo()
- for _, ch := range e.localChunks {
- if _, found := unique[string(ch.ID.GetValue())]; found {
- continue
- }
- result.AddChunk(ch)
- unique[string(ch.ID.GetValue())] = struct{}{}
- }
- for _, chunks := range e.remoteChunks {
- for _, ch := range chunks {
- if _, found := unique[string(ch.ID.GetValue())]; found {
- continue
- }
- result.AddChunk(ch)
- unique[string(ch.ID.GetValue())] = struct{}{}
- }
- }
- return objectSDK.NewECInfoError(result)
-}
diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go
index aaa09b891..01b57f1f2 100644
--- a/pkg/services/object/get/v2/errors.go
+++ b/pkg/services/object/get/v2/errors.go
@@ -4,8 +4,8 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
)
var (
@@ -63,6 +63,10 @@ func errCouldNotWriteObjChunk(forwarder string, err error) error {
return fmt.Errorf("could not write object chunk in %s forwarder: %w", forwarder, err)
}
+func errCouldNotVerifyRangeResponse(resp *objectV2.GetRangeResponse, err error) error {
+ return fmt.Errorf("could not verify %T: %w", resp, err)
+}
+
func errCouldNotCreateGetRangeStream(err error) error {
return fmt.Errorf("could not create Get payload range stream: %w", err)
}
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 60fcd7fbf..40aa3f62e 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -7,30 +7,28 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
type getRequestForwarder struct {
- OnceResign sync.Once
- GlobalProgress int
- Key *ecdsa.PrivateKey
- Request *objectV2.GetRequest
- Stream *streamObjectWriter
-
- headerSent bool
- headerSentGuard sync.Mutex
+ OnceResign sync.Once
+ OnceHeaderSending sync.Once
+ GlobalProgress int
+ Key *ecdsa.PrivateKey
+ Request *objectV2.GetRequest
+ Stream *streamObjectWriter
}
func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
@@ -85,15 +83,13 @@ func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetOb
obj.SetSignature(v.GetSignature())
obj.SetHeader(v.GetHeader())
- f.headerSentGuard.Lock()
- defer f.headerSentGuard.Unlock()
- if f.headerSent {
- return nil
- }
- if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil {
+ var err error
+ f.OnceHeaderSending.Do(func() {
+ err = f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj))
+ })
+ if err != nil {
return errCouldNotWriteObjHeader(err)
}
- f.headerSent = true
return nil
}
@@ -170,9 +166,6 @@ func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddr
case *objectV2.SplitInfo:
si := objectSDK.NewSplitInfoFromV2(v)
return objectSDK.NewSplitInfoError(si)
- case *objectV2.ECInfo:
- ei := objectSDK.NewECInfoFromV2(v)
- return objectSDK.NewECInfoError(ei)
}
}
return nil
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index a44616fc9..8a56c59a6 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -7,15 +7,16 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -63,6 +64,20 @@ func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, add
return nil, f.readStream(ctx, rangeStream, c, pubkey)
}
+func (f *getRangeRequestForwarder) verifyResponse(resp *objectV2.GetRangeResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return errCouldNotVerifyRangeResponse(resp, err)
+ }
+
+ return checkStatus(resp.GetMetaHeader().GetStatus())
+}
+
func (f *getRangeRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.ObjectRangeResponseReader, error) {
// open stream
var rangeStream *rpc.ObjectRangeResponseReader
@@ -92,7 +107,7 @@ func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream *
return errReadingResponseFailed(err)
}
- if err := verifyResponse(resp, pubkey); err != nil {
+ if err := f.verifyResponse(resp, pubkey); err != nil {
return err
}
@@ -117,9 +132,6 @@ func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream *
case *objectV2.SplitInfo:
si := objectSDK.NewSplitInfoFromV2(v)
return objectSDK.NewSplitInfoError(si)
- case *objectV2.ECInfo:
- ei := objectSDK.NewECInfoFromV2(v)
- return objectSDK.NewECInfoError(ei)
}
}
return nil
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
deleted file mode 100644
index 308ccd512..000000000
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package getsvc
-
-import (
- "context"
- "encoding/hex"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-// GetRangeHash calls internal service and returns v2 response.
-func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- forward, err := s.needToForwardGetRangeHashRequest(ctx, req)
- if err != nil {
- return nil, err
- }
- if forward.needToForward {
- return s.forwardGetRangeHashRequest(ctx, req, forward)
- }
- p, err := s.toHashRangePrm(req)
- if err != nil {
- return nil, err
- }
-
- res, err := s.svc.GetRangeHash(ctx, *p)
- if err != nil {
- return nil, err
- }
-
- return toHashResponse(req.GetBody().GetType(), res), nil
-}
-
-type getRangeForwardParams struct {
- needToForward bool
- containerNodes []netmapSDK.NodeInfo
- address oid.Address
-}
-
-func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) {
- if req.GetMetaHeader().GetTTL() <= 1 {
- return getRangeForwardParams{}, nil
- }
-
- var result getRangeForwardParams
- addrV2 := req.GetBody().GetAddress()
- if addrV2 == nil {
- return result, errMissingObjAddress
- }
-
- var addr oid.Address
- err := addr.ReadFromV2(*addrV2)
- if err != nil {
- return result, errInvalidObjAddress(err)
- }
- result.address = addr
-
- cont, err := s.contSource.Get(ctx, addr.Container())
- if err != nil {
- return result, fmt.Errorf("(%T) could not get container: %w", s, err)
- }
-
- epoch, err := s.netmapSource.Epoch(ctx)
- if err != nil {
- return result, fmt.Errorf("(%T) could not get epoch: %w", s, err)
- }
-
- nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch)
- if err != nil {
- return result, fmt.Errorf("(%T) could not get netmap: %w", s, err)
- }
-
- builder := placement.NewNetworkMapBuilder(nm)
-
- objectID := addr.Object()
- nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy())
- if err != nil {
- return result, fmt.Errorf("(%T) could not build object placement: %w", s, err)
- }
- result.containerNodes = distinctBy(placement.FlattenNodes(nodesVector), func(n netmapSDK.NodeInfo) string { return hex.EncodeToString(n.PublicKey()) })
-
- for _, node := range result.containerNodes {
- if s.announcedKeys.IsLocalKey(node.PublicKey()) {
- return result, nil
- }
- }
- result.needToForward = true
- return result, nil
-}
-
-func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest, params getRangeForwardParams) (*objectV2.GetRangeHashResponse, error) {
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(req.GetMetaHeader().GetTTL() - 1)
- metaHdr.SetOrigin(req.GetMetaHeader())
- writeCurrentVersion(metaHdr)
- req.SetMetaHeader(metaHdr)
-
- if err := signature.SignServiceMessage(key, req); err != nil {
- return nil, err
- }
-
- var firstErr error
- for _, node := range params.containerNodes {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- var addrGr network.AddressGroup
- if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
- continue
- }
-
- var extAddr network.AddressGroup
- if len(node.ExternalAddresses()) > 0 {
- if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
- s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
- continue
- }
- }
-
- var info clientCore.NodeInfo
- clientCore.NodeInfoFromNetmapElement(&info, placement.NewNode(addrGr, extAddr, node.PublicKey()))
-
- resp, err := s.performGetRangeHashOnNode(ctx, req, info)
- if err == nil {
- if err := verifyResponse(resp, info.PublicKey()); err != nil {
- return nil, err
- }
- return resp, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
- zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
- zap.Stringer("address", params.address),
- zap.Error(err))
- }
- s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
- if firstErr != nil {
- return nil, firstErr
- }
- return nil, new(apistatus.ObjectNotFound)
-}
-
-func (s *Service) performGetRangeHashOnNode(ctx context.Context, req *objectV2.GetRangeHashRequest, info clientCore.NodeInfo) (*objectV2.GetRangeHashResponse, error) {
- cl, err := s.clientSource.Get(info)
- if err != nil {
- return nil, err
- }
-
- var firstErr error
- var resp *objectV2.GetRangeHashResponse
- info.AddressGroup().IterateAddresses(func(a network.Address) bool {
- resp, err = s.performGetRangeHashOnAddress(ctx, req, cl, a)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- return false
- }
- return true
- })
- if firstErr != nil {
- return nil, firstErr
- }
- if resp == nil {
- return nil, new(apistatus.ObjectNotFound)
- }
- return resp, nil
-}
-
-func (s *Service) performGetRangeHashOnAddress(ctx context.Context, req *objectV2.GetRangeHashRequest, cl clientCore.MultiAddressClient,
- a network.Address,
-) (*objectV2.GetRangeHashResponse, error) {
- var resp *objectV2.GetRangeHashResponse
- var rpcErr error
- err := cl.RawForAddress(ctx, a, func(cli *rpcclient.Client) error {
- resp, rpcErr = rpc.HashObjectRange(cli, req, rpcclient.WithContext(ctx))
- return rpcErr
- })
- if err != nil {
- return nil, err
- }
- return resp, err
-}
-
-func distinctBy[T any, K comparable](source []T, keySelector func(v T) K) []T {
- var result []T
- dict := make(map[K]struct{})
- for _, v := range source {
- key := keySelector(v)
- if _, exists := dict[key]; !exists {
- result = append(result, v)
- dict[key] = struct{}{}
- }
- }
- return result
-}
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index 56056398d..a1bce1517 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -5,15 +5,16 @@ import (
"crypto/ecdsa"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -23,6 +24,7 @@ import (
type headRequestForwarder struct {
Request *objectV2.HeadRequest
+ Response *objectV2.HeadResponse
OnceResign sync.Once
ObjectAddr oid.Address
Key *ecdsa.PrivateKey
@@ -59,7 +61,7 @@ func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr ne
return nil, err
}
- if err := verifyResponse(headResp, pubkey); err != nil {
+ if err := f.verifyResponse(headResp, pubkey); err != nil {
return nil, err
}
@@ -82,9 +84,6 @@ func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr ne
case *objectV2.SplitInfo:
si := objectSDK.NewSplitInfoFromV2(v)
return nil, objectSDK.NewSplitInfoError(si)
- case *objectV2.ECInfo:
- ei := objectSDK.NewECInfoFromV2(v)
- return nil, objectSDK.NewECInfoError(ei)
}
objv2 := new(objectV2.Object)
@@ -158,3 +157,17 @@ func (f *headRequestForwarder) sendHeadRequest(ctx context.Context, addr network
}
return headResp, nil
}
+
+func (f *headRequestForwarder) verifyResponse(headResp *objectV2.HeadResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, headResp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(headResp); err != nil {
+ return errResponseVerificationFailed(err)
+ }
+
+ return checkStatus(f.Response.GetMetaHeader().GetStatus())
+}
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 0ec8912fd..d4bce178a 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -4,16 +4,11 @@ import (
"context"
"errors"
- clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.uber.org/zap"
)
// Service implements Get operation of Object service v2.
@@ -24,44 +19,15 @@ type Service struct {
// Option represents Service constructor option.
type Option func(*cfg)
-type clientSource interface {
- Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
-}
-
type cfg struct {
svc *getsvc.Service
keyStorage *objutil.KeyStorage
-
- clientSource clientSource
-
- netmapSource netmap.Source
-
- announcedKeys netmap.AnnouncedKeys
-
- contSource container.Source
-
- log *logger.Logger
}
// NewService constructs Service instance from provided options.
-func NewService(svc *getsvc.Service,
- keyStorage *objutil.KeyStorage,
- clientSource clientSource,
- netmapSource netmap.Source,
- announcedKeys netmap.AnnouncedKeys,
- contSource container.Source,
- opts ...Option,
-) *Service {
- c := &cfg{
- svc: svc,
- keyStorage: keyStorage,
- clientSource: clientSource,
- netmapSource: netmapSource,
- announcedKeys: announcedKeys,
- contSource: contSource,
- log: logger.NewLoggerWrapper(zap.L()),
- }
+func NewService(opts ...Option) *Service {
+ c := new(cfg)
for i := range opts {
opts[i](c)
@@ -82,13 +48,10 @@ func (s *Service) Get(req *objectV2.GetRequest, stream objectSvc.GetObjectStream
err = s.svc.Get(stream.Context(), *p)
var splitErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
switch {
case errors.As(err, &splitErr):
return stream.Send(splitInfoResponse(splitErr.SplitInfo()))
- case errors.As(err, &ecErr):
- return stream.Send(ecInfoResponse(ecErr.ECInfo()))
default:
return err
}
@@ -104,18 +67,30 @@ func (s *Service) GetRange(req *objectV2.GetRangeRequest, stream objectSvc.GetOb
err = s.svc.GetRange(stream.Context(), *p)
var splitErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
switch {
case errors.As(err, &splitErr):
return stream.Send(splitInfoRangeResponse(splitErr.SplitInfo()))
- case errors.As(err, &ecErr):
- return stream.Send(ecInfoRangeResponse(ecErr.ECInfo()))
default:
return err
}
}
+// GetRangeHash calls internal service and returns v2 response.
+func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
+ p, err := s.toHashRangePrm(req)
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := s.svc.GetRangeHash(ctx, *p)
+ if err != nil {
+ return nil, err
+ }
+
+ return toHashResponse(req.GetBody().GetType(), res), nil
+}
+
// Head serves ForstFS API v2 compatible HEAD requests.
func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
resp := new(objectV2.HeadResponse)
@@ -129,22 +104,24 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
err = s.svc.Head(ctx, *p)
var splitErr *objectSDK.SplitInfoError
- var ecErr *objectSDK.ECInfoError
if errors.As(err, &splitErr) {
setSplitInfoHeadResponse(splitErr.SplitInfo(), resp)
err = nil
}
- if errors.As(err, &ecErr) {
- setECInfoHeadResponse(ecErr.ECInfo(), resp)
- err = nil
- }
return resp, err
}
-func WithLogger(l *logger.Logger) Option {
+func WithInternalService(v *getsvc.Service) Option {
return func(c *cfg) {
- c.log = l
+ c.svc = v
+ }
+}
+
+// WithKeyStorage returns option to set local private key storage.
+func WithKeyStorage(ks *objutil.KeyStorage) Option {
+ return func(c *cfg) {
+ c.keyStorage = ks
}
}
diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go
index 0d73bcd4d..ce9a5c767 100644
--- a/pkg/services/object/get/v2/streamer.go
+++ b/pkg/services/object/get/v2/streamer.go
@@ -3,8 +3,8 @@ package getsvc
import (
"context"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
@@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
- return s.Send(newResponse(p))
+ return s.GetObjectStream.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
- return s.Send(newRangeResponse(chunk))
+ return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index 4b7dcc530..7f7dd7480 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -5,17 +5,15 @@ import (
"crypto/sha256"
"hash"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -182,7 +180,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran
default:
return nil, errUnknownChechsumType(t)
case refs.SHA256:
- p.SetHashGenerator(sha256.New)
+ p.SetHashGenerator(func() hash.Hash {
+ return sha256.New()
+ })
case refs.TillichZemor:
p.SetHashGenerator(func() hash.Hash {
return tz.New()
@@ -249,6 +249,7 @@ func (s *Service) toHeadPrm(req *objectV2.HeadRequest, resp *objectV2.HeadRespon
forwarder := &headRequestForwarder{
Request: req,
+ Response: resp,
ObjectAddr: objAddr,
Key: key,
}
@@ -269,17 +270,6 @@ func splitInfoResponse(info *objectSDK.SplitInfo) *objectV2.GetResponse {
return resp
}
-func ecInfoResponse(info *objectSDK.ECInfo) *objectV2.GetResponse {
- resp := new(objectV2.GetResponse)
-
- body := new(objectV2.GetResponseBody)
- resp.SetBody(body)
-
- body.SetObjectPart(info.ToV2())
-
- return resp
-}
-
func splitInfoRangeResponse(info *objectSDK.SplitInfo) *objectV2.GetRangeResponse {
resp := new(objectV2.GetRangeResponse)
@@ -291,25 +281,10 @@ func splitInfoRangeResponse(info *objectSDK.SplitInfo) *objectV2.GetRangeRespons
return resp
}
-func ecInfoRangeResponse(info *objectSDK.ECInfo) *objectV2.GetRangeResponse {
- resp := new(objectV2.GetRangeResponse)
-
- body := new(objectV2.GetRangeResponseBody)
- resp.SetBody(body)
-
- body.SetRangePart(info.ToV2())
-
- return resp
-}
-
func setSplitInfoHeadResponse(info *objectSDK.SplitInfo, resp *objectV2.HeadResponse) {
resp.GetBody().SetHeaderPart(info.ToV2())
}
-func setECInfoHeadResponse(info *objectSDK.ECInfo, resp *objectV2.HeadResponse) {
- resp.GetBody().SetHeaderPart(info.ToV2())
-}
-
func toHashResponse(typ refs.ChecksumType, res *getsvc.RangeHashRes) *objectV2.GetRangeHashResponse {
resp := new(objectV2.GetRangeHashResponse)
@@ -408,20 +383,3 @@ func chunkToSend(global, local int, chunk []byte) []byte {
return chunk[global-local:]
}
-
-type apiResponse interface {
- GetMetaHeader() *session.ResponseMetaHeader
- GetVerificationHeader() *session.ResponseVerificationHeader
-}
-
-func verifyResponse(resp apiResponse, pubkey []byte) error {
- if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return err
- }
-
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return errResponseVerificationFailed(err)
- }
-
- return checkStatus(resp.GetMetaHeader().GetStatus())
-}
diff --git a/pkg/services/object/head/prm.go b/pkg/services/object/head/prm.go
new file mode 100644
index 000000000..5566e48fe
--- /dev/null
+++ b/pkg/services/object/head/prm.go
@@ -0,0 +1,17 @@
+package headsvc
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type Prm struct {
+ addr oid.Address
+}
+
+func (p *Prm) WithAddress(v oid.Address) *Prm {
+ if p != nil {
+ p.addr = v
+ }
+
+ return p
+}
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/head/remote.go
similarity index 50%
rename from pkg/services/object/remote_reader.go
rename to pkg/services/object/head/remote.go
index bc6ffd160..c9c17d4d8 100644
--- a/pkg/services/object/remote_reader.go
+++ b/pkg/services/object/head/remote.go
@@ -1,7 +1,8 @@
-package object
+package headsvc
import (
"context"
+ "errors"
"fmt"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -17,33 +18,35 @@ type ClientConstructor interface {
Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
}
-// RemoteReader represents utility for getting
-// the object from a remote host.
-type RemoteReader struct {
+// RemoteHeader represents utility for getting
+// the object header from a remote host.
+type RemoteHeader struct {
keyStorage *util.KeyStorage
clientCache ClientConstructor
}
-// RemoteRequestPrm groups remote operation parameters.
-type RemoteRequestPrm struct {
- addr oid.Address
- raw bool
+// RemoteHeadPrm groups remote header operation parameters.
+type RemoteHeadPrm struct {
+ commonHeadPrm *Prm
+
node netmap.NodeInfo
}
const remoteOpTTL = 1
-// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
-func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
- return &RemoteReader{
+var ErrNotFound = errors.New("object header not found")
+
+// NewRemoteHeader creates, initializes and returns new RemoteHeader instance.
+func NewRemoteHeader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteHeader {
+ return &RemoteHeader{
keyStorage: keyStorage,
clientCache: cache,
}
}
// WithNodeInfo sets information about the remote node.
-func (p *RemoteRequestPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteRequestPrm {
+func (p *RemoteHeadPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteHeadPrm {
if p != nil {
p.node = v
}
@@ -52,23 +55,16 @@ func (p *RemoteRequestPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteRequestPrm {
}
// WithObjectAddress sets object address.
-func (p *RemoteRequestPrm) WithObjectAddress(v oid.Address) *RemoteRequestPrm {
+func (p *RemoteHeadPrm) WithObjectAddress(v oid.Address) *RemoteHeadPrm {
if p != nil {
- p.addr = v
+ p.commonHeadPrm = new(Prm).WithAddress(v)
}
return p
}
-func (p *RemoteRequestPrm) WithRaw(v bool) *RemoteRequestPrm {
- if p != nil {
- p.raw = v
- }
- return p
-}
-
// Head requests object header from the remote node.
-func (h *RemoteReader) Head(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) {
+func (h *RemoteHeader) Head(ctx context.Context, prm *RemoteHeadPrm) (*objectSDK.Object, error) {
key, err := h.keyStorage.GetKey(nil)
if err != nil {
return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err)
@@ -90,11 +86,8 @@ func (h *RemoteReader) Head(ctx context.Context, prm *RemoteRequestPrm) (*object
headPrm.SetClient(c)
headPrm.SetPrivateKey(key)
- headPrm.SetAddress(prm.addr)
+ headPrm.SetAddress(prm.commonHeadPrm.addr)
headPrm.SetTTL(remoteOpTTL)
- if prm.raw {
- headPrm.SetRawFlag()
- }
res, err := internalclient.HeadObject(ctx, headPrm)
if err != nil {
@@ -103,39 +96,3 @@ func (h *RemoteReader) Head(ctx context.Context, prm *RemoteRequestPrm) (*object
return res.Header(), nil
}
-
-func (h *RemoteReader) Get(ctx context.Context, prm *RemoteRequestPrm) (*objectSDK.Object, error) {
- key, err := h.keyStorage.GetKey(nil)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not receive private key: %w", h, err)
- }
-
- var info clientcore.NodeInfo
-
- err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node))
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- c, err := h.clientCache.Get(info)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", h, info.AddressGroup(), err)
- }
-
- var getPrm internalclient.GetObjectPrm
-
- getPrm.SetClient(c)
- getPrm.SetPrivateKey(key)
- getPrm.SetAddress(prm.addr)
- getPrm.SetTTL(remoteOpTTL)
- if prm.raw {
- getPrm.SetRawFlag()
- }
-
- res, err := internalclient.GetObject(ctx, getPrm)
- if err != nil {
- return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err)
- }
-
- return res.Object(), nil
-}
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 3e8832640..0301a593a 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -7,11 +7,9 @@ import (
"errors"
"fmt"
"io"
- "strconv"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -33,8 +31,6 @@ type commonPrm struct {
local bool
xHeaders []string
-
- netmapEpoch uint64
}
// SetClient sets base client for ForstFS API communication.
@@ -77,14 +73,6 @@ func (x *commonPrm) SetXHeaders(hs []string) {
x.xHeaders = hs
}
-func (x *commonPrm) calculateXHeaders() []string {
- hs := x.xHeaders
- if x.netmapEpoch != 0 {
- hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10))
- }
- return hs
-}
-
type readPrmCommon struct {
commonPrm
}
@@ -92,8 +80,8 @@ type readPrmCommon struct {
// SetNetmapEpoch sets the epoch number to be used to locate the objectSDK.
//
// By default current epoch on the server will be used.
-func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) {
- x.netmapEpoch = epoch
+func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
+ // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465
}
// GetObjectPrm groups parameters of GetObject operation.
@@ -151,7 +139,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
@@ -245,7 +233,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if err == nil {
@@ -338,7 +326,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
prm.ClientParams.Session = prm.tokenSession
}
- prm.ClientParams.XHeaders = prm.calculateXHeaders()
+ prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
@@ -355,9 +343,12 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
return nil, new(apistatus.ObjectOutOfRange)
}
- ln := min(prm.ln, maxInitialBufferSize)
+ ln := prm.ln
+ if ln > maxInitialBufferSize {
+ ln = maxInitialBufferSize
+ }
- w := bytes.NewBuffer(make([]byte, 0, ln))
+ w := bytes.NewBuffer(make([]byte, ln))
_, err = io.CopyN(w, rdr, int64(prm.ln))
if err != nil {
return nil, fmt.Errorf("read payload: %w", err)
@@ -401,14 +392,24 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObject")
defer span.End()
- prmCli := client.PrmObjectPutInit{
- XHeaders: prm.calculateXHeaders(),
- BearerToken: prm.tokenBearer,
- Session: prm.tokenSession,
- Local: true,
- Key: prm.key,
+ var prmCli client.PrmObjectPutInit
+
+ prmCli.MarkLocal()
+
+ if prm.key != nil {
+ prmCli.UseKey(*prm.key)
}
+ if prm.tokenSession != nil {
+ prmCli.WithinSession(*prm.tokenSession)
+ }
+
+ if prm.tokenBearer != nil {
+ prmCli.WithBearerToken(*prm.tokenBearer)
+ }
+
+ prmCli.WithXHeaders(prm.xHeaders...)
+
w, err := prm.cli.ObjectPutInit(ctx, prmCli)
if err != nil {
return nil, fmt.Errorf("init object writing on client: %w", err)
@@ -448,15 +449,25 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro
return nil, errors.New("missing object id")
}
- prmCli := client.PrmObjectPutSingle{
- XHeaders: prm.calculateXHeaders(),
- BearerToken: prm.tokenBearer,
- Session: prm.tokenSession,
- Local: true,
- Key: prm.key,
- Object: prm.obj,
+ var prmCli client.PrmObjectPutSingle
+
+ prmCli.ExecuteLocal()
+
+ if prm.key != nil {
+ prmCli.UseKey(prm.key)
}
+ if prm.tokenSession != nil {
+ prmCli.WithinSession(*prm.tokenSession)
+ }
+
+ if prm.tokenBearer != nil {
+ prmCli.WithBearerToken(*prm.tokenBearer)
+ }
+
+ prmCli.WithXHeaders(prm.xHeaders...)
+ prmCli.SetObject(prm.obj.ToV2())
+
res, err := prm.cli.ObjectPutSingle(ctx, prmCli)
if err != nil {
ReportError(prm.cli, err)
@@ -483,12 +494,12 @@ type SearchObjectsPrm struct {
//
// Required parameter.
func (x *SearchObjectsPrm) SetContainerID(id cid.ID) {
- x.cliPrm.ContainerID = &id
+ x.cliPrm.InContainer(id)
}
// SetFilters sets search filters.
func (x *SearchObjectsPrm) SetFilters(fs objectSDK.SearchFilters) {
- x.cliPrm.Filters = fs
+ x.cliPrm.SetFilters(fs)
}
// SearchObjectsRes groups the resulting values of SearchObjects operation.
@@ -505,11 +516,23 @@ func (x SearchObjectsRes) IDList() []oid.ID {
//
// Returns any error which prevented the operation from completing correctly in error return.
func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
- prm.cliPrm.Local = prm.local
- prm.cliPrm.Session = prm.tokenSession
- prm.cliPrm.BearerToken = prm.tokenBearer
- prm.cliPrm.XHeaders = prm.calculateXHeaders()
- prm.cliPrm.Key = prm.key
+ if prm.local {
+ prm.cliPrm.MarkLocal()
+ }
+
+ if prm.tokenSession != nil {
+ prm.cliPrm.WithinSession(*prm.tokenSession)
+ }
+
+ if prm.tokenBearer != nil {
+ prm.cliPrm.WithBearerToken(*prm.tokenBearer)
+ }
+
+ prm.cliPrm.WithXHeaders(prm.xHeaders...)
+
+ if prm.key != nil {
+ prm.cliPrm.UseKey(*prm.key)
+ }
rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
if err != nil {
diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go
index 1e0a7ef90..7ab5f082c 100644
--- a/pkg/services/object/internal/key.go
+++ b/pkg/services/object/internal/key.go
@@ -3,15 +3,14 @@ package internal
import (
"bytes"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
// VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not.
func VerifyResponseKeyV2(expectedKey []byte, resp interface {
GetVerificationHeader() *session.ResponseVerificationHeader
-},
-) error {
+}) error {
if !bytes.Equal(resp.GetVerificationHeader().GetBodySignature().GetKey(), expectedKey) {
return client.ErrWrongPublicKey
}
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 6a6ee0f0f..f972f43ae 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -4,9 +4,8 @@ import (
"context"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -28,14 +27,8 @@ type (
start time.Time
}
- patchStreamMetric struct {
- stream PatchObjectStream
- metrics MetricRegister
- start time.Time
- }
-
MetricRegister interface {
- AddRequestDuration(string, time.Duration, bool, string)
+ AddRequestDuration(string, time.Duration, bool)
AddPayloadSize(string, int)
}
)
@@ -52,7 +45,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
if m.enabled {
t := time.Now()
defer func() {
- m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Get", time.Since(t), err == nil)
}()
err = m.next.Get(req, &getStreamMetric{
ServerStream: stream,
@@ -65,11 +58,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
+func (m MetricCollector) Put() (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put(ctx)
+ stream, err := m.next.Put()
if err != nil {
return nil, err
}
@@ -80,25 +73,7 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put(ctx)
-}
-
-func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) {
- if m.enabled {
- t := time.Now()
-
- stream, err := m.next.Patch(ctx)
- if err != nil {
- return nil, err
- }
-
- return &patchStreamMetric{
- stream: stream,
- metrics: m.metrics,
- start: t,
- }, nil
- }
- return m.next.Patch(ctx)
+ return m.next.Put()
}
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
@@ -107,7 +82,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl
res, err := m.next.PutSingle(ctx, request)
- m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil)
if err == nil {
m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload()))
}
@@ -123,7 +98,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest)
res, err := m.next.Head(ctx, request)
- m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Head", time.Since(t), err == nil)
return res, err
}
@@ -136,7 +111,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream)
err := m.next.Search(req, stream)
- m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("Search", time.Since(t), err == nil)
return err
}
@@ -149,7 +124,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque
res, err := m.next.Delete(ctx, request)
- m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil)
return res, err
}
return m.next.Delete(ctx, request)
@@ -161,7 +136,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR
err := m.next.GetRange(req, stream)
- m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context()))
+ m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil)
return err
}
@@ -174,7 +149,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa
res, err := m.next.GetRangeHash(ctx, request)
- m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx))
+ m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil)
return res, err
}
@@ -210,21 +185,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error
func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
res, err := s.stream.CloseAndRecv(ctx)
- s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
-
- return res, err
-}
-
-func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error {
- s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk()))
-
- return s.stream.Send(ctx, req)
-}
-
-func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
- res, err := s.stream.CloseAndRecv(ctx)
-
- s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx))
+ s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil)
return res, err
}
diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go
deleted file mode 100644
index cb3f7c342..000000000
--- a/pkg/services/object/patch/range_provider.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package patchsvc
-
-import (
- "context"
- "crypto/ecdsa"
- "io"
-
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- objectUtil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- patcherSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
-)
-
-func (p *pipeChunkWriter) WriteChunk(_ context.Context, chunk []byte) error {
- _, err := p.wr.Write(chunk)
- return err
-}
-
-type rangeProvider struct {
- getSvc *getsvc.Service
-
- addr oid.Address
-
- commonPrm *objectUtil.CommonPrm
-
- localNodeKey *ecdsa.PrivateKey
-}
-
-var _ patcherSDK.RangeProvider = (*rangeProvider)(nil)
-
-func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader {
- // Remote GetRange request to a container node uses an SDK-client that fails range validation
- // with zero-length. However, from the patcher's point of view, such request is still valid.
- if rng.GetLength() == 0 {
- return &nopReader{}
- }
-
- pipeReader, pipeWriter := io.Pipe()
-
- var rngPrm getsvc.RangePrm
- rngPrm.SetSignerKey(r.localNodeKey)
- rngPrm.SetCommonParameters(r.commonPrm)
-
- rngPrm.WithAddress(r.addr)
- rngPrm.SetChunkWriter(&pipeChunkWriter{
- wr: pipeWriter,
- })
- rngPrm.SetRange(rng)
-
- getRangeErr := make(chan error)
-
- go func() {
- defer pipeWriter.Close()
-
- select {
- case <-ctx.Done():
- pipeWriter.CloseWithError(ctx.Err())
- case err := <-getRangeErr:
- pipeWriter.CloseWithError(err)
- }
- }()
-
- go func() {
- getRangeErr <- r.getSvc.GetRange(ctx, rngPrm)
- }()
-
- return pipeReader
-}
-
-type nopReader struct{}
-
-func (nopReader) Read(_ []byte) (int, error) {
- return 0, io.EOF
-}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
deleted file mode 100644
index 5d298bfed..000000000
--- a/pkg/services/object/patch/service.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package patchsvc
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
-)
-
-// Service implements Put operation of Object service v2.
-type Service struct {
- *objectwriter.Config
-
- getSvc *getsvc.Service
-}
-
-// NewService constructs Service instance from provided options.
-//
-// Patch service can use the same objectwriter.Config initializied by Put service.
-func NewService(cfg *objectwriter.Config,
- getSvc *getsvc.Service,
-) *Service {
- return &Service{
- Config: cfg,
-
- getSvc: getSvc,
- }
-}
-
-// Patch calls internal service and returns v2 object streamer.
-func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.KeyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- return &Streamer{
- Config: s.Config,
- getSvc: s.getSvc,
- localNodeKey: nodeKey,
- }, nil
-}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
deleted file mode 100644
index ff13b1d3e..000000000
--- a/pkg/services/object/patch/streamer.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package patchsvc
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
- "fmt"
- "io"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
-)
-
-// Streamer for the patch handler is a pipeline that merges two incoming streams of patches
-// and original object payload chunks. The merged result is fed to Put stream target.
-type Streamer struct {
- *objectwriter.Config
-
- // Patcher must be initialized at first Streamer.Send call.
- patcher patcher.PatchApplier
-
- nonFirstSend bool
-
- getSvc *getsvc.Service
-
- localNodeKey *ecdsa.PrivateKey
-}
-
-type pipeChunkWriter struct {
- wr *io.PipeWriter
-}
-
-type headResponseWriter struct {
- body *objectV2.HeadResponseBody
-}
-
-func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error {
- w.body.SetHeaderPart(toFullObjectHeader(hdr))
- return nil
-}
-
-func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
- obj := hdr.ToV2()
-
- hs := new(objectV2.HeaderWithSignature)
- hs.SetHeader(obj.GetHeader())
- hs.SetSignature(obj.GetSignature())
-
- return hs
-}
-
-func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
- split := hdr.GetHeader().GetSplit()
- return len(split.GetChildren()) > 0 && split.GetParent() != nil
-}
-
-func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
- return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
-}
-
-func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
- hdrWithSig, addr, err := s.readHeader(ctx, req)
- if err != nil {
- return err
- }
-
- if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
- return errors.New("non-regular object can't be patched")
- }
- if isLinkObject(hdrWithSig) {
- return errors.New("linking object can't be patched")
- }
- if isComplexObjectPart(hdrWithSig) {
- return errors.New("complex object parts can't be patched")
- }
-
- commonPrm, err := util.CommonPrmFromV2(req)
- if err != nil {
- return err
- }
- commonPrm.WithLocalOnly(false)
-
- rangeProvider := &rangeProvider{
- getSvc: s.getSvc,
-
- addr: addr,
-
- commonPrm: commonPrm,
-
- localNodeKey: s.localNodeKey,
- }
-
- hdr := hdrWithSig.GetHeader()
- oV2 := new(objectV2.Object)
- hV2 := new(objectV2.Header)
- oV2.SetHeader(hV2)
- oV2.GetHeader().SetContainerID(hdr.GetContainerID())
- oV2.GetHeader().SetPayloadLength(hdr.GetPayloadLength())
- oV2.GetHeader().SetAttributes(hdr.GetAttributes())
-
- ownerID, err := newOwnerID(req.GetVerificationHeader())
- if err != nil {
- return err
- }
- oV2.GetHeader().SetOwnerID(ownerID)
-
- target, err := target.New(ctx, objectwriter.Params{
- Config: s.Config,
- Common: commonPrm,
- Header: objectSDK.NewFromV2(oV2),
- })
- if err != nil {
- return fmt.Errorf("target creation: %w", err)
- }
-
- patcherPrm := patcher.Params{
- Header: objectSDK.NewFromV2(oV2),
-
- RangeProvider: rangeProvider,
-
- ObjectWriter: target,
- }
-
- s.patcher = patcher.New(patcherPrm)
- return nil
-}
-
-func (s *Streamer) readHeader(ctx context.Context, req *objectV2.PatchRequest) (hdrWithSig *objectV2.HeaderWithSignature, addr oid.Address, err error) {
- addrV2 := req.GetBody().GetAddress()
- if addrV2 == nil {
- err = errors.New("patch request has nil-address")
- return
- }
-
- if err = addr.ReadFromV2(*addrV2); err != nil {
- err = fmt.Errorf("read address error: %w", err)
- return
- }
-
- commonPrm, err := util.CommonPrmFromV2(req)
- if err != nil {
- return
- }
- commonPrm.WithLocalOnly(false)
-
- var p getsvc.HeadPrm
- p.SetSignerKey(s.localNodeKey)
- p.SetCommonParameters(commonPrm)
-
- resp := new(objectV2.HeadResponse)
- resp.SetBody(new(objectV2.HeadResponseBody))
-
- p.WithAddress(addr)
- p.SetHeaderWriter(&headResponseWriter{
- body: resp.GetBody(),
- })
-
- err = s.getSvc.Head(ctx, p)
- if err != nil {
- err = fmt.Errorf("get header error: %w", err)
- return
- }
-
- var ok bool
- hdrPart := resp.GetBody().GetHeaderPart()
- if hdrWithSig, ok = hdrPart.(*objectV2.HeaderWithSignature); !ok {
- err = fmt.Errorf("unexpected header type: %T", hdrPart)
- }
- return
-}
-
-func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send")
- defer span.End()
-
- defer func() {
- s.nonFirstSend = true
- }()
-
- if !s.nonFirstSend {
- if err := s.init(ctx, req); err != nil {
- return fmt.Errorf("streamer init error: %w", err)
- }
- }
-
- patch := new(objectSDK.Patch)
- patch.FromV2(req.GetBody())
-
- if !s.nonFirstSend {
- err := s.patcher.ApplyHeaderPatch(ctx,
- patcher.ApplyHeaderPatchPrm{
- NewSplitHeader: patch.NewSplitHeader,
- NewAttributes: patch.NewAttributes,
- ReplaceAttributes: patch.ReplaceAttributes,
- })
- if err != nil {
- return fmt.Errorf("patch attributes: %w", err)
- }
- }
-
- if patch.PayloadPatch != nil {
- err := s.patcher.ApplyPayloadPatch(ctx, patch.PayloadPatch)
- if err != nil {
- return fmt.Errorf("patch payload: %w", err)
- }
- } else if s.nonFirstSend {
- return errors.New("invalid non-first patch: empty payload")
- }
-
- return nil
-}
-
-func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- if s.patcher == nil {
- return nil, errors.New("uninitialized patch streamer")
- }
- patcherResp, err := s.patcher.Close(ctx)
- if err != nil {
- return nil, err
- }
-
- oidV2 := new(refsV2.ObjectID)
-
- if patcherResp.AccessIdentifiers.ParentID != nil {
- patcherResp.AccessIdentifiers.ParentID.WriteToV2(oidV2)
- } else {
- patcherResp.AccessIdentifiers.SelfID.WriteToV2(oidV2)
- }
-
- return &objectV2.PatchResponse{
- Body: &objectV2.PatchResponseBody{
- ObjectID: oidV2,
- },
- }, nil
-}
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
deleted file mode 100644
index b9416789c..000000000
--- a/pkg/services/object/patch/util.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package patchsvc
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
-)
-
-func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) {
- for vh.GetOrigin() != nil {
- vh = vh.GetOrigin()
- }
- sig := vh.GetBodySignature()
- if sig == nil {
- return nil, errors.New("empty body signature")
- }
- key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
- if err != nil {
- return nil, fmt.Errorf("invalid signature key: %w", err)
- }
-
- var userID user.ID
- user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
- ownID := new(refs.OwnerID)
- userID.WriteToV2(ownID)
-
- return ownID, nil
-}
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/put/common.go
similarity index 61%
rename from pkg/services/object/common/writer/common.go
rename to pkg/services/object/put/common.go
index 6593d3ca0..6696a192b 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/put/common.go
@@ -1,4 +1,4 @@
-package writer
+package putsvc
import (
"context"
@@ -13,23 +13,23 @@ import (
"go.uber.org/zap"
)
-type NodeIterator struct {
- Traversal
- cfg *Config
+type nodeIterator struct {
+ traversal
+ cfg *cfg
}
-func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
- return &NodeIterator{
- Traversal: Traversal{
- Opts: opts,
- Exclude: make(map[string]*bool),
+func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
+ return &nodeIterator{
+ traversal: traversal{
+ opts: opts,
+ mExclude: make(map[string]*bool),
},
cfg: c,
}
}
-func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
- traverser, err := placement.NewTraverser(ctx, n.Opts...)
+func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
+ traverser, err := placement.NewTraverser(n.traversal.opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.submitPrimaryPlacementFinish() {
- err := n.ForEachNode(ctx, f)
+ if n.traversal.submitPrimaryPlacementFinish() {
+ err := n.forEachNode(ctx, f)
if err != nil {
- n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -67,11 +67,12 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
return nil
}
-func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool {
+func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
var wg sync.WaitGroup
for _, addr := range addrs {
- if ok := n.Exclude[string(addr.PublicKey())]; ok != nil {
+ addr := addr
+ if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
@@ -79,29 +80,33 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
continue
}
- isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey())
+ workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
item := new(bool)
wg.Add(1)
- go func() {
+ if err := workerPool.Submit(func() {
defer wg.Done()
- err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
+ err := f(ctx, nodeDesc{local: isLocal, info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
- }()
+ }); err != nil {
+ wg.Done()
+ svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
+ return true
+ }
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.submitProcessed(addr, item)
+ n.traversal.submitProcessed(addr, item)
}
wg.Wait()
@@ -109,6 +114,6 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
return false
}
-func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
+func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
}
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
new file mode 100644
index 000000000..509f4aee0
--- /dev/null
+++ b/pkg/services/object/put/distributed.go
@@ -0,0 +1,156 @@
+package putsvc
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+type preparedObjectTarget interface {
+ WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error
+}
+
+type distributedTarget struct {
+ placementOpts []placement.Option
+ extraBroadcastEnabled bool
+
+ obj *objectSDK.Object
+ objMeta object.ContentMeta
+
+ *cfg
+
+ payload *payload
+
+ nodeTargetInitializer func(nodeDesc) preparedObjectTarget
+
+ relay func(context.Context, nodeDesc) error
+}
+
+// parameters and state of container traversal.
+type traversal struct {
+ opts []placement.Option
+
+ // need of additional broadcast after the object is saved
+ extraBroadcastEnabled bool
+
+ // container nodes which was processed during the primary object placement
+ mExclude map[string]*bool
+}
+
+// updates traversal parameters after the primary placement finish and
+// returns true if additional container broadcast is needed.
+func (x *traversal) submitPrimaryPlacementFinish() bool {
+ if x.extraBroadcastEnabled {
+ // do not track success during container broadcast (best-effort)
+ x.opts = append(x.opts, placement.WithoutSuccessTracking())
+
+ // avoid 2nd broadcast
+ x.extraBroadcastEnabled = false
+
+ return true
+ }
+
+ return false
+}
+
+// marks the container node as processed during the primary object placement.
+func (x *traversal) submitProcessed(n placement.Node, item *bool) {
+ if x.extraBroadcastEnabled {
+ key := string(n.PublicKey())
+
+ if x.mExclude == nil {
+ x.mExclude = make(map[string]*bool, 1)
+ }
+
+ x.mExclude[key] = item
+ }
+}
+
+type nodeDesc struct {
+ local bool
+
+ info placement.Node
+}
+
+// errIncompletePut is returned if processing on a container fails.
+type errIncompletePut struct {
+ singleErr error // error from the last responding node
+}
+
+func (x errIncompletePut) Error() string {
+ const commonMsg = "incomplete object PUT by placement"
+
+ if x.singleErr != nil {
+ return fmt.Sprintf("%s: %v", commonMsg, x.singleErr)
+ }
+
+ return commonMsg
+}
+
+func (t *distributedTarget) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
+ t.obj = obj
+
+ return nil
+}
+
+func (t *distributedTarget) Write(_ context.Context, p []byte) (n int, err error) {
+ t.payload.Data = append(t.payload.Data, p...)
+
+ return len(p), nil
+}
+
+func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
+ defer func() {
+ putPayload(t.payload)
+ t.payload = nil
+ }()
+
+ t.obj.SetPayload(t.payload.Data)
+
+ if err := t.WriteObject(ctx, t.obj); err != nil {
+ return nil, err
+ }
+
+ id, _ := t.obj.ID()
+ return &transformer.AccessIdentifiers{
+ SelfID: id,
+ }, nil
+}
+
+// WriteObject implements the transformer.ObjectWriter interface.
+func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ t.obj = obj
+
+ var err error
+
+ if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil {
+ return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
+ }
+ return t.iteratePlacement(ctx)
+}
+
+func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
+ if !node.local && t.relay != nil {
+ return t.relay(ctx, node)
+ }
+
+ target := t.nodeTargetInitializer(node)
+
+ err := target.WriteObject(ctx, t.obj, t.objMeta)
+ if err != nil {
+ return fmt.Errorf("could not write header: %w", err)
+ }
+ return nil
+}
+
+func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
+ id, _ := t.obj.ID()
+
+ iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
+ iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
+ return iter.forEachNode(ctx, t.sendObject)
+}
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/put/local.go
similarity index 68%
rename from pkg/services/object/common/writer/local.go
rename to pkg/services/object/put/local.go
index cf3d03275..54649adc7 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/put/local.go
@@ -1,12 +1,10 @@
-package writer
+package putsvc
import (
"context"
"fmt"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -15,7 +13,7 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(context.Context, *objectSDK.Object, bool) error
+ Put(context.Context, *objectSDK.Object) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
@@ -26,24 +24,19 @@ type ObjectStorage interface {
IsLocked(context.Context, oid.Address) (bool, error)
}
-type LocalTarget struct {
- Storage ObjectStorage
- Container containerSDK.Container
+type localTarget struct {
+ storage ObjectStorage
}
-func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
- if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
- return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
-
+func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
switch meta.Type() {
case objectSDK.TypeTombstone:
- err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not delete objects from tombstone locally: %w", err)
}
case objectSDK.TypeLock:
- err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not lock object from lock objects locally: %w", err)
}
@@ -51,5 +44,8 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
+ if err := t.storage.Put(ctx, obj); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
return nil
}
diff --git a/pkg/services/object/common/target/pool.go b/pkg/services/object/put/pool.go
similarity index 96%
rename from pkg/services/object/common/target/pool.go
rename to pkg/services/object/put/pool.go
index 71da305ad..ebe214caf 100644
--- a/pkg/services/object/common/target/pool.go
+++ b/pkg/services/object/put/pool.go
@@ -1,4 +1,4 @@
-package target
+package putsvc
import (
"sync"
diff --git a/pkg/services/object/common/writer/remote.go b/pkg/services/object/put/remote.go
similarity index 92%
rename from pkg/services/object/common/writer/remote.go
rename to pkg/services/object/put/remote.go
index 697613ff7..ee8d64e7a 100644
--- a/pkg/services/object/common/writer/remote.go
+++ b/pkg/services/object/put/remote.go
@@ -1,4 +1,4 @@
-package writer
+package putsvc
import (
"context"
@@ -16,7 +16,7 @@ import (
"google.golang.org/grpc/status"
)
-type remoteWriter struct {
+type remoteTarget struct {
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
@@ -41,7 +41,7 @@ type RemotePutPrm struct {
obj *objectSDK.Object
}
-func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
+func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
@@ -64,7 +64,7 @@ func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _
return t.putStream(ctx, prm)
}
-func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObject(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -72,7 +72,7 @@ func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObje
return nil
}
-func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteTarget) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObjectSingle(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -113,7 +113,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
return err
}
- t := &remoteWriter{
+ t := &remoteTarget{
privateKey: key,
clientConstructor: s.clientConstructor,
}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 7aeb5857d..3a7dcefd6 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,63 +1,131 @@
package putsvc
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
+type MaxSizeSource interface {
+ // MaxObjectSize returns maximum payload size
+ // of physically stored object in system.
+ //
+ // Must return 0 if value can not be obtained.
+ MaxObjectSize() uint64
+}
+
type Service struct {
- *objectwriter.Config
+ *cfg
+}
+
+type Option func(*cfg)
+
+type ClientConstructor interface {
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
+}
+
+type InnerRing interface {
+ InnerRingKeys() ([][]byte, error)
+}
+
+type FormatValidatorConfig interface {
+ VerifySessionTokenIssuer() bool
+}
+
+type cfg struct {
+ keyStorage *objutil.KeyStorage
+
+ maxSizeSrc MaxSizeSource
+
+ localStore ObjectStorage
+
+ cnrSrc container.Source
+
+ netMapSrc netmap.Source
+
+ remotePool, localPool util.WorkerPool
+
+ netmapKeys netmap.AnnouncedKeys
+
+ fmtValidator *object.FormatValidator
+
+ networkState netmap.State
+
+ clientConstructor ClientConstructor
+
+ log *logger.Logger
+
+ verifySessionTokenIssuer bool
}
func NewService(ks *objutil.KeyStorage,
- cc objectwriter.ClientConstructor,
- ms objectwriter.MaxSizeSource,
- os objectwriter.ObjectStorage,
+ cc ClientConstructor,
+ ms MaxSizeSource,
+ os ObjectStorage,
cs container.Source,
ns netmap.Source,
nk netmap.AnnouncedKeys,
nst netmap.State,
- ir objectwriter.InnerRing,
- opts ...objectwriter.Option,
-) *Service {
- c := &objectwriter.Config{
- Logger: logger.NewLoggerWrapper(zap.L()),
- KeyStorage: ks,
- ClientConstructor: cc,
- MaxSizeSrc: ms,
- LocalStore: os,
- ContainerSource: cs,
- NetmapSource: ns,
- NetmapKeys: nk,
- NetworkState: nst,
+ ir InnerRing,
+ opts ...Option) *Service {
+ c := &cfg{
+ remotePool: util.NewPseudoWorkerPool(),
+ localPool: util.NewPseudoWorkerPool(),
+ log: &logger.Logger{Logger: zap.L()},
+ keyStorage: ks,
+ clientConstructor: cc,
+ maxSizeSrc: ms,
+ localStore: os,
+ cnrSrc: cs,
+ netMapSrc: ns,
+ netmapKeys: nk,
+ networkState: nst,
}
for i := range opts {
opts[i](c)
}
- c.FormatValidator = object.NewFormatValidator(
+ c.fmtValidator = object.NewFormatValidator(
object.WithLockSource(os),
object.WithNetState(nst),
object.WithInnerRing(ir),
object.WithNetmapSource(ns),
object.WithContainersSource(cs),
- object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer),
- object.WithLogger(c.Logger),
+ object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer),
+ object.WithLogger(c.log),
)
return &Service{
- Config: c,
+ cfg: c,
}
}
-func (s *Service) Put() (*Streamer, error) {
+func (p *Service) Put() (*Streamer, error) {
return &Streamer{
- Config: s.Config,
+ cfg: p.cfg,
}, nil
}
+
+func WithWorkerPools(remote, local util.WorkerPool) Option {
+ return func(c *cfg) {
+ c.remotePool, c.localPool = remote, local
+ }
+}
+
+func WithLogger(l *logger.Logger) Option {
+ return func(c *cfg) {
+ c.log = l
+ }
+}
+
+func WithVerifySessionTokenIssuer(v bool) Option {
+ return func(c *cfg) {
+ c.verifySessionTokenIssuer = v
+ }
+}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 90f473254..3865aabb9 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -9,27 +9,22 @@ import (
"hash"
"sync"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
"go.opentelemetry.io/otel/attribute"
@@ -37,8 +32,6 @@ import (
"go.uber.org/zap"
)
-var errInvalidPayloadChecksum = errors.New("incorrect payload checksum")
-
type putSingleRequestSigner struct {
req *objectAPI.PutSingleRequest
keyStorage *svcutil.KeyStorage
@@ -86,7 +79,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest
}
func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.validarePutSingleSize(ctx, obj); err != nil {
+ if err := s.validarePutSingleSize(obj); err != nil {
return object.ContentMeta{}, err
}
@@ -97,14 +90,14 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
return s.validatePutSingleObject(ctx, obj)
}
-func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error {
+func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
- return target.ErrWrongPayloadSize
+ return ErrWrongPayloadSize
}
- maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
+ maxAllowedSize := s.maxSizeSrc.MaxObjectSize()
if obj.PayloadSize() > maxAllowedSize {
- return target.ErrExceedingMaxSize
+ return ErrExceedingMaxSize
}
return nil
@@ -132,18 +125,18 @@ func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error {
}
if !bytes.Equal(hash.Sum(nil), cs.Value()) {
- return errInvalidPayloadChecksum
+ return fmt.Errorf("incorrect payload checksum")
}
return nil
}
func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.FormatValidator.Validate(ctx, obj, false); err != nil {
+ if err := s.fmtValidator.Validate(ctx, obj, false); err != nil {
return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err)
}
- meta, err := s.FormatValidator.ValidateContent(obj)
+ meta, err := s.fmtValidator.ValidateContent(obj)
if err != nil {
return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err)
}
@@ -153,127 +146,71 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob
func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
localOnly := req.GetMetaHeader().GetTTL() <= 1
- placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly)
+ placementOptions, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly)
if err != nil {
return err
}
- if placement.isEC {
- return s.saveToECReplicas(ctx, placement, obj, req, meta)
- }
-
- return s.saveToREPReplicas(ctx, placement, obj, localOnly, req, meta)
-}
-
-func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.NewNodeIterator(placement.placementOptions)
- iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
- iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
+ iter := s.cfg.newNodeIterator(placementOptions)
+ iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.KeyStorage,
+ keyStorage: s.keyStorage,
signer: &sync.Once{},
}
- return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
- return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
+ return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error {
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
}
-func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- commonPrm, err := svcutil.CommonPrmFromV2(req)
- if err != nil {
- return err
+func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) ([]placement.Option, error) {
+ var result []placement.Option
+ if len(copiesNumber) > 0 {
+ result = append(result, placement.WithCopyNumbers(copiesNumber))
}
- key, err := s.KeyStorage.GetKey(nil)
- if err != nil {
- return err
- }
- signer := &putSingleRequestSigner{
- req: req,
- keyStorage: s.KeyStorage,
- signer: &sync.Once{},
- }
-
- w := objectwriter.ECWriter{
- Config: s.Config,
- PlacementOpts: placement.placementOptions,
- ObjectMeta: meta,
- ObjectMetaValid: true,
- CommonPrm: commonPrm,
- Container: placement.container,
- Key: key,
- Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
- return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac)
- },
- }
- return w.WriteObject(ctx, obj)
-}
-
-type putSinglePlacement struct {
- placementOptions []placement.Option
- isEC bool
- container containerSDK.Container
- resetSuccessAfterOnBroadcast bool
-}
-
-func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
- var result putSinglePlacement
cnrID, ok := obj.ContainerID()
if !ok {
- return result, errors.New("missing container ID")
+ return nil, errors.New("missing container ID")
}
- cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
+ cnrInfo, err := s.cnrSrc.Get(cnrID)
if err != nil {
- return result, fmt.Errorf("could not get container by ID: %w", err)
+ return nil, fmt.Errorf("could not get container by ID: %w", err)
}
- result.container = cnrInfo.Value
- result.isEC = container.IsECContainer(cnrInfo.Value) && object.IsECSupported(obj)
- if len(copiesNumber) > 0 && !result.isEC {
- result.placementOptions = append(result.placementOptions, placement.WithCopyNumbers(copiesNumber))
- }
- if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly {
- result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1)))
- result.resetSuccessAfterOnBroadcast = true
- }
- result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value))
+ result = append(result, placement.ForContainer(cnrInfo.Value))
objID, ok := obj.ID()
if !ok {
- return result, errors.New("missing object ID")
+ return nil, errors.New("missing object ID")
}
- if obj.ECHeader() != nil {
- objID = obj.ECHeader().Parent()
- }
- result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
+ result = append(result, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
+ latestNetmap, err := netmap.GetLatestNetworkMap(s.netMapSrc)
if err != nil {
- return result, fmt.Errorf("could not get latest network map: %w", err)
+ return nil, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
- result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
+ result = append(result, placement.SuccessAfter(1))
+ builder = svcutil.NewLocalPlacement(builder, s.netmapKeys)
}
- result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
+ result = append(result, placement.UseBuilder(builder))
return result, nil
}
-func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
- signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
-) error {
- if nodeDesc.Local {
- return s.saveLocal(ctx, obj, meta, container)
+func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object,
+ signer *putSingleRequestSigner, meta object.ContentMeta) error {
+ if nodeDesc.local {
+ return s.saveLocal(ctx, obj, meta)
}
var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
+ client.NodeInfoFromNetmapElement(&info, nodeDesc.info)
- c, err := s.ClientConstructor.Get(info)
+ c, err := s.clientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -281,10 +218,9 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
}
-func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
- localTarget := &objectwriter.LocalTarget{
- Storage: s.LocalStore,
- Container: container,
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
+ localTarget := &localTarget{
+ storage: s.localStore,
}
return localTarget.WriteObject(ctx, obj, meta)
}
@@ -293,8 +229,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
signer *putSingleRequestSigner,
obj *objectSDK.Object,
info client.NodeInfo,
- c client.MultiAddressClient,
-) error {
+ c client.MultiAddressClient) error {
ctx, span := tracing.StartSpanFromContext(ctx, "putService.redirectPutSingleRequest")
defer span.End()
@@ -317,11 +252,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
+ s.log.Warn(logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
zap.Stringer("container_id", cnrID),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@@ -350,12 +286,8 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
err = signature.VerifyServiceMessage(resp)
if err != nil {
err = fmt.Errorf("response verification failed: %w", err)
- return
}
- st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus())
- err = apistatus.ErrFromStatus(st)
-
return
})
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 19768b7fa..10f932849 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -2,18 +2,31 @@ package putsvc
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ pkgutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
type Streamer struct {
- *objectwriter.Config
+ *cfg
+
+ sessionKey *ecdsa.PrivateKey
target transformer.ChunkedObjectWriter
+
+ relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ maxPayloadSz uint64 // network config
}
var errNotInit = errors.New("stream not initialized")
@@ -21,23 +34,8 @@ var errNotInit = errors.New("stream not initialized")
var errInitRecall = errors.New("init recall")
func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
- if p.target != nil {
- return errInitRecall
- }
-
// initialize destination target
- prmTarget := objectwriter.Params{
- Config: p.Config,
- Common: prm.common,
- Header: prm.hdr,
- Container: prm.cnr,
- TraverseOpts: prm.traverseOpts,
- Relay: prm.relay,
- }
-
- var err error
- p.target, err = target.New(ctx, prmTarget)
- if err != nil {
+ if err := p.initTarget(prm); err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
@@ -47,6 +45,201 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
return nil
}
+// MaxObjectSize returns maximum payload size for the streaming session.
+//
+// Must be called after the successful Init.
+func (p *Streamer) MaxObjectSize() uint64 {
+ return p.maxPayloadSz
+}
+
+func (p *Streamer) initTarget(prm *PutInitPrm) error {
+ // prevent re-calling
+ if p.target != nil {
+ return errInitRecall
+ }
+
+ // prepare needed put parameters
+ if err := p.preparePrm(prm); err != nil {
+ return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err)
+ }
+
+ p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize()
+ if p.maxPayloadSz == 0 {
+ return fmt.Errorf("(%T) could not obtain max object size parameter", p)
+ }
+
+ if prm.hdr.Signature() != nil {
+ return p.initUntrustedTarget(prm)
+ }
+ return p.initTrustedTarget(prm)
+}
+
+func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
+ p.relay = prm.relay
+
+ // prepare untrusted-Put object target
+ p.target = &validatingPreparedTarget{
+ nextTarget: p.newCommonTarget(prm),
+ fmt: p.fmtValidator,
+
+ maxPayloadSz: p.maxPayloadSz,
+ }
+
+ return nil
+}
+
+func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
+ sToken := prm.common.SessionToken()
+
+ // prepare trusted-Put object target
+
+ // get private token from local storage
+ var sessionInfo *util.SessionInfo
+
+ if sToken != nil {
+ sessionInfo = &util.SessionInfo{
+ ID: sToken.ID(),
+ Owner: sToken.Issuer(),
+ }
+ }
+
+ sessionKey, err := p.keyStorage.GetKey(sessionInfo)
+ if err != nil {
+ return fmt.Errorf("(%T) could not receive session key: %w", p, err)
+ }
+
+ // In case session token is missing, the line above returns the default key.
+ // If it isn't owner key, replication attempts will fail, thus this check.
+ if sToken == nil {
+ ownerObj := prm.hdr.OwnerID()
+ if ownerObj == nil {
+ return errors.New("missing object owner")
+ }
+
+ var ownerSession user.ID
+ user.IDFromKey(&ownerSession, sessionKey.PublicKey)
+
+ if !ownerObj.Equals(ownerSession) {
+ return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p)
+ }
+ }
+
+ p.sessionKey = sessionKey
+ p.target = &validatingTarget{
+ fmt: p.fmtValidator,
+ nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: sessionKey,
+ NextTargetInit: func() transformer.ObjectWriter { return p.newCommonTarget(prm) },
+ NetworkState: p.networkState,
+ MaxSize: p.maxPayloadSz,
+ WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.cnr),
+ SessionToken: sToken,
+ }),
+ }
+
+ return nil
+}
+
+func (p *Streamer) preparePrm(prm *PutInitPrm) error {
+ var err error
+
+ // get latest network map
+ nm, err := netmap.GetLatestNetworkMap(p.netMapSrc)
+ if err != nil {
+ return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
+ }
+
+ idCnr, ok := prm.hdr.ContainerID()
+ if !ok {
+ return errors.New("missing container ID")
+ }
+
+ // get container to store the object
+ cnrInfo, err := p.cnrSrc.Get(idCnr)
+ if err != nil {
+ return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
+ }
+
+ prm.cnr = cnrInfo.Value
+
+ // add common options
+ prm.traverseOpts = append(prm.traverseOpts,
+ // set processing container
+ placement.ForContainer(prm.cnr),
+ )
+
+ if id, ok := prm.hdr.ID(); ok {
+ prm.traverseOpts = append(prm.traverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(id),
+ )
+ }
+
+ // create placement builder from network map
+ builder := placement.NewNetworkMapBuilder(nm)
+
+ if prm.common.LocalOnly() {
+ // restrict success count to 1 stored copy (to local storage)
+ prm.traverseOpts = append(prm.traverseOpts, placement.SuccessAfter(1))
+
+ // use local-only placement builder
+ builder = util.NewLocalPlacement(builder, p.netmapKeys)
+ }
+
+ // set placement builder
+ prm.traverseOpts = append(prm.traverseOpts, placement.UseBuilder(builder))
+
+ return nil
+}
+
+func (p *Streamer) newCommonTarget(prm *PutInitPrm) *distributedTarget {
+ var relay func(context.Context, nodeDesc) error
+ if p.relay != nil {
+ relay = func(ctx context.Context, node nodeDesc) error {
+ var info client.NodeInfo
+
+ client.NodeInfoFromNetmapElement(&info, node.info)
+
+ c, err := p.clientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ return p.relay(ctx, info, c)
+ }
+ }
+
+ // enable additional container broadcast on non-local operation
+ // if object has TOMBSTONE or LOCK type.
+ typ := prm.hdr.Type()
+ withBroadcast := !prm.common.LocalOnly() && (typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLock)
+
+ return &distributedTarget{
+ cfg: p.cfg,
+ placementOpts: prm.traverseOpts,
+ extraBroadcastEnabled: withBroadcast,
+ payload: getPayload(),
+ nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
+ if node.local {
+ return localTarget{
+ storage: p.localStore,
+ }
+ }
+
+ rt := &remoteTarget{
+ privateKey: p.sessionKey,
+ commonPrm: prm.common,
+ clientConstructor: p.clientConstructor,
+ }
+
+ client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.info)
+
+ return rt
+ },
+ relay: relay,
+ }
+}
+
func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
@@ -80,3 +273,10 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
id: ids.SelfID,
}, nil
}
+
+func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
+ if c.netmapKeys.IsLocalKey(pub) {
+ return c.localPool, true
+ }
+ return c.remotePool, false
+}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index 78d4c711d..db902ae59 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
+ objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Put operation of Object service v2.
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index f0c648187..9c6de4ca8 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -4,19 +4,18 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -56,15 +55,15 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx)
+ maxSz := s.stream.MaxObjectSize()
s.sizes = &sizes{
- payloadSz: v.GetHeader().GetPayloadLength(),
+ payloadSz: uint64(v.GetHeader().GetPayloadLength()),
}
// check payload size limit overflow
if s.payloadSz > maxSz {
- return target.ErrExceedingMaxSize
+ return putsvc.ErrExceedingMaxSize
}
s.init = req
@@ -75,7 +74,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
// check payload size overflow
if s.writtenPayload > s.payloadSz {
- return target.ErrWrongPayloadSize
+ return putsvc.ErrWrongPayloadSize
}
}
@@ -118,7 +117,7 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
- return nil, target.ErrWrongPayloadSize
+ return nil, putsvc.ErrWrongPayloadSize
}
}
diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go
index 5ec9ebe10..a157a9542 100644
--- a/pkg/services/object/put/v2/util.go
+++ b/pkg/services/object/put/v2/util.go
@@ -1,10 +1,10 @@
package putsvc
import (
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
diff --git a/pkg/services/object/common/target/validation.go b/pkg/services/object/put/validation.go
similarity index 99%
rename from pkg/services/object/common/target/validation.go
rename to pkg/services/object/put/validation.go
index b29721d01..c2b078ef5 100644
--- a/pkg/services/object/common/target/validation.go
+++ b/pkg/services/object/put/validation.go
@@ -1,4 +1,4 @@
-package target
+package putsvc
import (
"bytes"
diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go
deleted file mode 100644
index 01eb1ea8d..000000000
--- a/pkg/services/object/qos.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package object
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
-)
-
-var _ ServiceServer = (*qosObjectService)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type qosObjectService struct {
- next ServiceServer
- adj AdjustIOTag
-}
-
-func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer {
- return &qosObjectService{
- next: next,
- adj: adjIOTag,
- }
-}
-
-func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Delete(ctx, req)
-}
-
-func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Get(req, &qosReadStream[*object.GetResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.GetRangeHash(ctx, req)
-}
-
-func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Head(ctx, req)
-}
-
-func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) {
- s, err := q.next.Patch(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) {
- s, err := q.next.Put(ctx)
- if err != nil {
- return nil, err
- }
- return &qosWriteStream[*object.PutRequest, *object.PutResponse]{
- s: s,
- adj: q.adj,
- }, nil
-}
-
-func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.PutSingle(ctx, req)
-}
-
-func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error {
- ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey())
- return q.next.Search(req, &qosReadStream[*object.SearchResponse]{
- ctxF: func() context.Context { return ctx },
- sender: s,
- })
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosReadStream[T any] struct {
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (g *qosReadStream[T]) Context() context.Context {
- return g.ctxF()
-}
-
-func (g *qosReadStream[T]) Send(resp T) error {
- return g.sender.Send(resp)
-}
-
-type qosVerificationHeader interface {
- GetVerificationHeader() *session.RequestVerificationHeader
-}
-
-type qosSendRecv[TReq qosVerificationHeader, TResp any] interface {
- Send(context.Context, TReq) error
- CloseAndRecv(context.Context) (TResp, error)
-}
-
-type qosWriteStream[TReq qosVerificationHeader, TResp any] struct {
- s qosSendRecv[TReq, TResp]
- adj AdjustIOTag
-
- ioTag string
- ioTagDefined bool
-}
-
-func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) {
- if q.ioTagDefined {
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- }
- return q.s.CloseAndRecv(ctx)
-}
-
-func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error {
- if !q.ioTagDefined {
- ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey())
- q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx)
- }
- assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment")
- ctx = tagging.ContextWithIOTag(ctx, q.ioTag)
- return q.s.Send(ctx, req)
-}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 80c971e8f..a10f26a34 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type ResponseService struct {
@@ -37,11 +37,6 @@ type putStreamResponser struct {
respSvc *response.Service
}
-type patchStreamResponser struct {
- stream PatchObjectStream
- respSvc *response.Service
-}
-
// NewResponseService returns object service instance that passes internal service
// call to response service.
func NewResponseService(objSvc ServiceServer, respSvc *response.Service) *ResponseService {
@@ -80,8 +75,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo
return r, nil
}
-func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *ResponseService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -92,35 +87,6 @@ func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
}, nil
}
-func (s *patchStreamResponser) Send(ctx context.Context, req *object.PatchRequest) error {
- if err := s.stream.Send(ctx, req); err != nil {
- return fmt.Errorf("could not send the request: %w", err)
- }
- return nil
-}
-
-func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
- r, err := s.stream.CloseAndRecv(ctx)
- if err != nil {
- return nil, fmt.Errorf("could not close stream and receive response: %w", err)
- }
-
- s.respSvc.SetMeta(r)
- return r, nil
-}
-
-func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
- if err != nil {
- return nil, fmt.Errorf("could not create Put object streamer: %w", err)
- }
-
- return &patchStreamResponser{
- stream: stream,
- respSvc: s.respSvc,
- }, nil
-}
-
func (s *ResponseService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
resp, err := s.svc.PutSingle(ctx, req)
if err != nil {
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 60d469b11..a8865f5f0 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -8,19 +8,18 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
+ exec.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
// initialize epoch number
- if err := exec.initEpoch(ctx); err != nil {
+ if err := exec.initEpoch(); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
}
@@ -44,11 +43,11 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ProcessEpoch,
+ exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
- traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch)
+ traverser, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
}
@@ -59,7 +58,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -72,8 +71,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
- zap.Error(ctx.Err()))
+ exec.log.Debug(logs.InterruptPlacementIterationByContext,
+ zap.String("error", ctx.Err().Error()))
return
default:
}
@@ -82,18 +81,18 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
- zap.Error(err))
+ exec.log.Debug(logs.SearchRemoteOperationFailed,
+ zap.String("error", err.Error()))
return
}
@@ -102,7 +101,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
err = exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
- exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
+ exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
return
}
}(i)
@@ -113,12 +112,3 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
-
-func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) {
- cnrID := exec.containerID()
- cnr, err := exec.svc.containerSource.Get(ctx, cnrID)
- if err != nil {
- return containerSDK.Container{}, err
- }
- return cnr.Value, nil
-}
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index ced51ecce..2e0d48773 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,8 +1,6 @@
package searchsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -21,16 +19,16 @@ type execCtx struct {
}
func (exec *execCtx) setLogger(l *logger.Logger) {
- exec.log = l.With(
+ exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "SEARCH"),
zap.Stringer("container", exec.containerID()),
zap.Bool("local", exec.isLocal()),
zap.Bool("with session", exec.prm.common.SessionToken() != nil),
zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
- )
+ )}
}
-func (exec *execCtx) isLocal() bool {
+func (exec execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -50,13 +48,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
-func (exec *execCtx) initEpoch(ctx context.Context) error {
+func (exec *execCtx) initEpoch() error {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
}
- e, err := exec.svc.currentEpochReceiver.Epoch(ctx)
+ e, err := exec.svc.currentEpochReceiver.Epoch()
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index ec65ab06a..cfaed13b8 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
- exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
+ exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
return err
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 76c091f85..4a5c414d5 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,26 +20,26 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) error {
- exec.log.Debug(ctx, logs.ServingRequest)
+ exec.log.Debug(logs.ServingRequest)
err := exec.executeLocal(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
if exec.isLocal() {
- exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
+ exec.log.Debug(logs.SearchReturnResultDirectly)
return err
}
err = exec.executeOnContainer(ctx)
- exec.logResult(ctx, err)
+ exec.logResult(err)
return err
}
-func (exec *execCtx) logResult(ctx context.Context, err error) {
+func (exec *execCtx) logResult(err error) {
switch {
default:
- exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
+ exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
case err == nil:
- exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 918ad421f..09d98eff2 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -6,18 +6,16 @@ import (
"crypto/sha256"
"errors"
"fmt"
- "slices"
"strconv"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
@@ -59,7 +57,7 @@ type simpleIDWriter struct {
type testEpochReceiver uint64
-func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) {
+func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
@@ -82,16 +80,15 @@ func newTestStorage() *testStorage {
}
}
-func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) {
- t, err := placement.NewTraverser(context.Background(),
+func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, error) {
+ return placement.NewTraverser(
placement.ForContainer(g.c),
placement.UseBuilder(g.b[epoch]),
placement.WithoutSuccessTracking(),
)
- return t, &containerCore.Container{Value: g.c}, err
}
-func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
var addr oid.Address
addr.SetContainer(cnr)
@@ -104,7 +101,8 @@ func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, o
return nil, errors.New("vectors for address not found")
}
- res := slices.Clone(vs)
+ res := make([][]netmap.NodeInfo, len(vs))
+ copy(res, vs)
return res, nil
}
@@ -151,7 +149,7 @@ func testSHA256() (cs [sha256.Size]byte) {
func generateIDs(num int) []oid.ID {
res := make([]oid.ID, num)
- for i := range num {
+ for i := 0; i < num; i++ {
res[i].SetSHA256(testSHA256())
}
@@ -163,7 +161,7 @@ func TestGetLocalOnly(t *testing.T) {
newSvc := func(storage *testStorage) *Service {
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(t)
+ svc.log = test.NewLogger(t, true)
svc.localStorage = storage
return svc
@@ -232,7 +230,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := range dim[i] {
+ for j := 0; j < dim[i]; j++ {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -279,7 +277,7 @@ func TestGetRemoteSmall(t *testing.T) {
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(t)
+ svc.log = test.NewLogger(t, true)
svc.localStorage = newTestStorage()
const curEpoch = 13
@@ -432,7 +430,7 @@ func TestGetFromPastEpoch(t *testing.T) {
c22.addResult(idCnr, ids22, nil)
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(t)
+ svc.log = test.NewLogger(t, true)
svc.localStorage = newTestStorage()
const curEpoch = 13
@@ -545,7 +543,7 @@ func TestGetWithSessionToken(t *testing.T) {
w := new(simpleIDWriter)
svc := &Service{cfg: new(cfg)}
- svc.log = test.NewLogger(t)
+ svc.log = test.NewLogger(t, true)
svc.localStorage = localStorage
const curEpoch = 13
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index 56fe56468..863312200 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -4,7 +4,6 @@ import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -46,16 +45,14 @@ type cfg struct {
}
traverserGenerator interface {
- GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error)
+ GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, error)
}
currentEpochReceiver interface {
- Epoch(ctx context.Context) (uint64, error)
+ Epoch() (uint64, error)
}
keyStore *util.KeyStorage
-
- containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -65,11 +62,9 @@ func New(e *engine.StorageEngine,
tg *util.TraverserGenerator,
ns netmap.Source,
ks *util.KeyStorage,
- cs container.Source,
- opts ...Option,
-) *Service {
+ opts ...Option) *Service {
c := &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
clientConstructor: &clientConstructorWrapper{
constructor: cc,
},
@@ -79,7 +74,6 @@ func New(e *engine.StorageEngine,
traverserGenerator: tg,
currentEpochReceiver: ns,
keyStore: ks,
- containerSource: cs,
}
for i := range opts {
@@ -94,6 +88,6 @@ func New(e *engine.StorageEngine,
// WithLogger returns option to specify Get service's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
- c.log = l
+ c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
}
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 0be5345b9..67b6c0d01 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -2,11 +2,9 @@ package searchsvc
import (
"context"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -54,7 +52,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
}
// exclude processed address
- list = slices.Delete(list, i, i+1)
+ list = append(list[:i], list[i+1:]...)
i--
}
@@ -114,13 +112,9 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
- cnr, err := exec.getContainer(ctx)
- if err != nil {
- return nil, err
- }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
+ selectPrm.WithContainerID(exec.containerID())
r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go
index 7bb6e4d3c..5a2e9b936 100644
--- a/pkg/services/object/search/v2/request_forwarder.go
+++ b/pkg/services/object/search/v2/request_forwarder.go
@@ -8,14 +8,14 @@ import (
"io"
"sync"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go
index 856cd9f04..78b72ac79 100644
--- a/pkg/services/object/search/v2/service.go
+++ b/pkg/services/object/search/v2/service.go
@@ -1,10 +1,10 @@
package searchsvc
import (
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// Service implements Search operation of Object service v2.
diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go
index 93b281343..15e2d53d5 100644
--- a/pkg/services/object/search/v2/streamer.go
+++ b/pkg/services/object/search/v2/streamer.go
@@ -1,9 +1,9 @@
package searchsvc
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go
index 48ae98958..e971fa8e5 100644
--- a/pkg/services/object/search/v2/util.go
+++ b/pkg/services/object/search/v2/util.go
@@ -5,12 +5,12 @@ import (
"errors"
"fmt"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index e65293977..73b88f233 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -3,8 +3,8 @@ package object
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
// GetObjectStream is an interface of FrostFS API v2 compatible object streamer.
@@ -31,18 +31,11 @@ type PutObjectStream interface {
CloseAndRecv(context.Context) (*object.PutResponse, error)
}
-// PatchObjectStream is an interface of FrostFS API v2 compatible patch streamer.
-type PatchObjectStream interface {
- Send(context.Context, *object.PatchRequest) error
- CloseAndRecv(context.Context) (*object.PatchResponse, error)
-}
-
// ServiceServer is an interface of utility
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put(context.Context) (PutObjectStream, error)
- Patch(context.Context) (PatchObjectStream, error)
+ Put() (PutObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index fd8e926dd..faad26489 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -5,11 +5,13 @@ import (
"crypto/ecdsa"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type SignService struct {
+ key *ecdsa.PrivateKey
+
sigSvc *util.SignService
svc ServiceServer
@@ -33,12 +35,6 @@ type putStreamSigner struct {
err error
}
-type patchStreamSigner struct {
- sigSvc *util.SignService
- stream PatchObjectStream
- err error
-}
-
type getRangeStreamSigner struct {
GetObjectRangeStream
sigSvc *util.SignService
@@ -46,6 +42,7 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
+ key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
@@ -96,16 +93,15 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes
} else {
resp, err = s.stream.CloseAndRecv(ctx)
if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PutResponse)
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
}
}
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *SignService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
@@ -116,43 +112,6 @@ func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
}, nil
}
-func (s *patchStreamSigner) Send(ctx context.Context, req *object.PatchRequest) error {
- if s.err = s.sigSvc.VerifyRequest(req); s.err != nil {
- return util.ErrAbortStream
- }
- if s.err = s.stream.Send(ctx, req); s.err != nil {
- return util.ErrAbortStream
- }
- return nil
-}
-
-func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PatchResponse, err error) {
- if s.err != nil {
- err = s.err
- resp = new(object.PatchResponse)
- } else {
- resp, err = s.stream.CloseAndRecv(ctx)
- if err != nil {
- err = fmt.Errorf("could not close stream and receive response: %w", err)
- resp = new(object.PatchResponse)
- }
- }
-
- return resp, s.sigSvc.SignResponse(resp, err)
-}
-
-func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) {
- stream, err := s.svc.Patch(ctx)
- if err != nil {
- return nil, fmt.Errorf("could not create Put object streamer: %w", err)
- }
-
- return &patchStreamSigner{
- stream: stream,
- sigSvc: s.sigSvc,
- }, nil
-}
-
func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
if err := s.sigSvc.VerifyRequest(req); err != nil {
resp := new(object.HeadResponse)
@@ -163,7 +122,6 @@ func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*objec
}
func (s *SignService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
- req.GetBody().SetMarshalData(req.GetBody().StableMarshal(nil))
if err := s.sigSvc.VerifyRequest(req); err != nil {
resp := new(object.PutSingleResponse)
return resp, s.sigSvc.SignResponse(resp, err)
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index b446d3605..2d9810cd3 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -4,8 +4,8 @@ import (
"bytes"
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
)
type (
@@ -87,12 +87,8 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
- return c.next.Put(ctx)
-}
-
-func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) {
- return c.next.Patch(ctx)
+func (c TransportSplitter) Put() (PutObjectStream, error) {
+ return c.next.Put()
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -162,13 +158,16 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
var newResp *object.SearchResponse
- for {
+ for ln := uint64(len(ids)); ; {
if newResp == nil {
newResp = new(object.SearchResponse)
newResp.SetBody(body)
}
- cut := min(s.addrAmount, uint64(len(ids)))
+ cut := s.addrAmount
+ if cut > ln {
+ cut = ln
+ }
body.SetIDList(ids[:cut])
newResp.SetMetaHeader(resp.GetMetaHeader())
diff --git a/pkg/services/object/util/key.go b/pkg/services/object/util/key.go
index 23d6c1c68..8304bf13a 100644
--- a/pkg/services/object/util/key.go
+++ b/pkg/services/object/util/key.go
@@ -67,7 +67,7 @@ func (s *KeyStorage) GetKey(info *SessionInfo) (*ecdsa.PrivateKey, error) {
pToken := s.tokenStore.Get(info.Owner, binID)
if pToken != nil {
- if pToken.ExpiredAt() < s.networkState.CurrentEpoch() {
+ if pToken.ExpiredAt() <= s.networkState.CurrentEpoch() {
return nil, new(apistatus.SessionTokenExpired)
}
return pToken.SessionKey(), nil
diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go
index 1753a26f7..76e320e0c 100644
--- a/pkg/services/object/util/key_test.go
+++ b/pkg/services/object/util/key_test.go
@@ -5,10 +5,10 @@ import (
"crypto/elliptic"
"testing"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -25,7 +25,7 @@ func TestNewKeyStorage(t *testing.T) {
tokenStor := tokenStorage.NewTokenStore()
stor := util.NewKeyStorage(&nodeKey.PrivateKey, tokenStor, mockedNetworkState{42})
- owner := usertest.ID()
+ owner := *usertest.ID()
t.Run("node key", func(t *testing.T) {
key, err := stor.GetKey(nil)
@@ -36,7 +36,7 @@ func TestNewKeyStorage(t *testing.T) {
t.Run("unknown token", func(t *testing.T) {
_, err = stor.GetKey(&util.SessionInfo{
ID: uuid.New(),
- Owner: usertest.ID(),
+ Owner: *usertest.ID(),
})
require.Error(t, err)
})
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index b10826226..92beedaa7 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,8 +1,6 @@
package util
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -10,10 +8,18 @@ import (
)
// LogServiceError writes error message of object service to provided logger.
-func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error(ctx, logs.UtilObjectServiceError,
+func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
+ l.Error(logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
- zap.Error(err),
+ zap.String("error", err.Error()),
+ )
+}
+
+// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
+func LogWorkerPoolError(l *logger.Logger, req string, err error) {
+ l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
+ zap.String("request", req),
+ zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go
index f74b0aab9..6cd3856f4 100644
--- a/pkg/services/object/util/placement.go
+++ b/pkg/services/object/util/placement.go
@@ -1,9 +1,7 @@
package util
import (
- "context"
"fmt"
- "slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -45,8 +43,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
}
}
-func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -78,8 +76,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
}
}
-func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy)
+func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ vs, err := p.builder.BuildPlacement(cnr, obj, policy)
if err != nil {
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
}
@@ -94,7 +92,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o
}
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
- vs[i] = slices.Delete(vs[i], j, j+1)
+ vs[i] = append(vs[i][:j], vs[i][j+1:]...)
j--
}
}
@@ -124,17 +122,17 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav
// GenerateTraverser generates placement Traverser for provided object address
// using epoch-th network map.
-func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) {
+func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, error) {
// get network map by epoch
- nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch)
+ nm, err := g.netMapSrc.GetNetMapByEpoch(epoch)
if err != nil {
- return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
+ return nil, fmt.Errorf("could not get network map #%d: %w", epoch, err)
}
// get container related container
- cnr, err := g.cnrSrc.Get(ctx, idCnr)
+ cnr, err := g.cnrSrc.Get(idCnr)
if err != nil {
- return nil, nil, fmt.Errorf("could not get container: %w", err)
+ return nil, fmt.Errorf("could not get container: %w", err)
}
// allocate placement traverser options
@@ -162,9 +160,5 @@ func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID
)
}
- t, err := placement.NewTraverser(ctx, traverseOpts...)
- if err != nil {
- return nil, nil, err
- }
- return t, cnr, nil
+ return placement.NewTraverser(traverseOpts...)
}
diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go
index 34d8ec704..c09c07cc1 100644
--- a/pkg/services/object/util/prm.go
+++ b/pkg/services/object/util/prm.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strconv"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
@@ -100,24 +100,16 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) {
// ForgetTokens forgets all the tokens read from the request's
// meta information before.
-func (p *CommonPrm) ForgetTokens() func() {
+func (p *CommonPrm) ForgetTokens() {
if p != nil {
- tk := p.token
- br := p.bearer
p.token = nil
p.bearer = nil
- return func() {
- p.token = tk
- p.bearer = br
- }
}
- return func() {}
}
func CommonPrmFromV2(req interface {
GetMetaHeader() *session.RequestMetaHeader
-},
-) (*CommonPrm, error) {
+}) (*CommonPrm, error) {
meta := req.GetMetaHeader()
ttl := meta.GetTTL()
diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go
deleted file mode 100644
index 2a8460ca5..000000000
--- a/pkg/services/object_manager/placement/cache.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package placement
-
-import (
- "crypto/sha256"
- "fmt"
- "slices"
- "sync"
-
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/hashicorp/golang-lru/v2/simplelru"
-)
-
-// ContainerNodesCache caches results of ContainerNodes() invocation between epochs.
-type ContainerNodesCache struct {
- // mtx protects lastEpoch and containerCache fields.
- mtx sync.Mutex
- // lastEpoch contains network map epoch for all values in the container cache.
- lastEpoch uint64
- // containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
- // neither netmap nor container has changed.
- containerCache simplelru.LRUCache[cid.ID, [][]netmapSDK.NodeInfo]
-}
-
-// defaultContainerCacheSize is the default size for the container cache.
-const defaultContainerCacheSize = 10
-
-// NewContainerNodesCache creates new cache which saves the result of the ContainerNodes() invocations.
-// If size is <= 0, defaultContainerCacheSize (10) is used.
-func NewContainerNodesCache(size int) *ContainerNodesCache {
- if size <= 0 {
- size = defaultContainerCacheSize
- }
-
- cache, _ := simplelru.NewLRU[cid.ID, [][]netmapSDK.NodeInfo](size, nil) // no error
- return &ContainerNodesCache{
- containerCache: cache,
- }
-}
-
-// ContainerNodes returns the result of nm.ContainerNodes(), possibly from the cache.
-func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- c.mtx.Lock()
- if nm.Epoch() == c.lastEpoch {
- raw, ok := c.containerCache.Get(cnr)
- c.mtx.Unlock()
- if ok {
- return c.cloneResult(raw), nil
- }
- } else {
- c.lastEpoch = nm.Epoch()
- c.containerCache.Purge()
- c.mtx.Unlock()
- }
-
- binCnr := make([]byte, sha256.Size)
- cnr.Encode(binCnr)
-
- cn, err := nm.ContainerNodes(p, binCnr)
- if err != nil {
- return nil, fmt.Errorf("could not get container nodes: %w", err)
- }
-
- c.mtx.Lock()
- if c.lastEpoch == nm.Epoch() {
- c.containerCache.Add(cnr, cn)
- }
- c.mtx.Unlock()
- return c.cloneResult(cn), nil
-}
-
-func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo {
- result := make([][]netmapSDK.NodeInfo, len(nodes))
- for repIdx := range nodes {
- result[repIdx] = slices.Clone(nodes[repIdx])
- }
- return result
-}
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
deleted file mode 100644
index 7242970b5..000000000
--- a/pkg/services/object_manager/placement/cache_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package placement_test
-
-import (
- "strconv"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/stretchr/testify/require"
-)
-
-func TestContainerNodesCache(t *testing.T) {
- const size = 3
-
- nodes := [6]netmapSDK.NodeInfo{}
- for i := range nodes {
- nodes[i].SetAttribute("ATTR", strconv.Itoa(i))
- }
-
- nm := func(epoch uint64, nodes []netmapSDK.NodeInfo) *netmapSDK.NetMap {
- var nm netmapSDK.NetMap
- nm.SetEpoch(epoch)
- nm.SetNodes(nodes)
- return &nm
- }
-
- var pp netmapSDK.PlacementPolicy
- require.NoError(t, pp.DecodeString("REP 1"))
-
- t.Run("update netmap on the new epoch", func(t *testing.T) {
- c := placement.NewContainerNodesCache(size)
-
- cnr := cidtest.ID()
- res, err := c.ContainerNodes(nm(1, nodes[0:1]), cnr, pp)
- require.NoError(t, err)
-
- // Use other nodes in the argument to ensure the result is taken from cache.
- resCached, err := c.ContainerNodes(nm(1, nodes[1:2]), cnr, pp)
- require.NoError(t, err)
- require.Equal(t, res, resCached)
-
- // Update epoch, netmap should be purged.
- resCached, err = c.ContainerNodes(nm(2, nodes[2:3]), cnr, pp)
- require.NoError(t, err)
- require.NotEqual(t, res, resCached)
- })
- t.Run("cache uses container as a key", func(t *testing.T) {
- c := placement.NewContainerNodesCache(size)
-
- res1, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
- require.NoError(t, err)
-
- res2, err := c.ContainerNodes(nm(1, nodes[1:2]), cidtest.ID(), pp)
- require.NoError(t, err)
-
- require.NotEqual(t, res1, res2)
- })
- t.Run("cache respects size parameter", func(t *testing.T) {
- c := placement.NewContainerNodesCache(size)
-
- nm1 := nm(1, nodes[0:1])
- nm2 := nm(1, nodes[1:2])
- cnr := [size * 2]cid.ID{}
- res := [size * 2][][]netmapSDK.NodeInfo{}
- for i := range size * 2 {
- cnr[i] = cidtest.ID()
-
- var err error
- res[i], err = c.ContainerNodes(nm1, cnr[i], pp)
- require.NoError(t, err)
- }
-
- for i := size; i < size*2; i++ {
- r, err := c.ContainerNodes(nm2, cnr[i], pp)
- require.NoError(t, err)
- require.Equal(t, res[i], r)
- }
- for i := range size {
- r, err := c.ContainerNodes(nm2, cnr[i], pp)
- require.NoError(t, err)
- require.NotEqual(t, res[i], r)
- }
- })
- t.Run("the error is propagated", func(t *testing.T) {
- var pp netmapSDK.PlacementPolicy
- r := netmapSDK.ReplicaDescriptor{}
- r.SetNumberOfObjects(1)
- r.SetSelectorName("Missing")
- pp.AddReplicas(r)
-
- c := placement.NewContainerNodesCache(size)
- _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp)
- require.Error(t, err)
- })
-}
diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go
deleted file mode 100644
index 0f24a9d96..000000000
--- a/pkg/services/object_manager/placement/metrics.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package placement
-
-import (
- "errors"
- "fmt"
- "maps"
- "math"
- "strings"
- "sync"
- "sync/atomic"
-
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-const (
- attrPrefix = "$attribute:"
-
- geoDistance = "$geoDistance"
-)
-
-type Metric interface {
- CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int
-}
-
-type metricsParser struct {
- locodeDBPath string
- locodes map[string]locodedb.Point
-}
-
-type MetricParser interface {
- ParseMetrics([]string) ([]Metric, error)
-}
-
-func NewMetricsParser(locodeDBPath string) (MetricParser, error) {
- return &metricsParser{
- locodeDBPath: locodeDBPath,
- }, nil
-}
-
-func (p *metricsParser) initLocodes() error {
- if len(p.locodes) != 0 {
- return nil
- }
- if len(p.locodeDBPath) > 0 {
- p.locodes = make(map[string]locodedb.Point)
- locodeDB := locodebolt.New(locodebolt.Prm{
- Path: p.locodeDBPath,
- },
- locodebolt.ReadOnly(),
- )
- err := locodeDB.Open()
- if err != nil {
- return err
- }
- defer locodeDB.Close()
- err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) {
- p.locodes[k] = v
- })
- if err != nil {
- return err
- }
- return nil
- }
- return errors.New("set path to locode database")
-}
-
-func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) {
- var metrics []Metric
- for _, raw := range priority {
- if attr, found := strings.CutPrefix(raw, attrPrefix); found {
- metrics = append(metrics, NewAttributeMetric(attr))
- } else if raw == geoDistance {
- err := p.initLocodes()
- if err != nil {
- return nil, err
- }
- if len(p.locodes) == 0 {
- return nil, fmt.Errorf("provide locodes database for metric %s", raw)
- }
- m := NewGeoDistanceMetric(p.locodes)
- metrics = append(metrics, m)
- } else {
- return nil, fmt.Errorf("unsupported priority metric %s", raw)
- }
- }
- return metrics, nil
-}
-
-// attributeMetric describes priority metric based on attribute.
-type attributeMetric struct {
- attribute string
-}
-
-// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and
-// the value of attribute is the same. In other case return [1].
-func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
- fromAttr := from.Attribute(am.attribute)
- toAttr := to.Attribute(am.attribute)
- if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr {
- return 0
- }
- return 1
-}
-
-func NewAttributeMetric(attr string) Metric {
- return &attributeMetric{attribute: attr}
-}
-
-// geoDistanceMetric describes priority metric based on attribute.
-type geoDistanceMetric struct {
- locodes map[string]locodedb.Point
- distance *atomic.Pointer[map[string]int]
- mtx sync.Mutex
-}
-
-func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric {
- d := atomic.Pointer[map[string]int]{}
- m := make(map[string]int)
- d.Store(&m)
- gm := &geoDistanceMetric{
- locodes: locodes,
- distance: &d,
- }
- return gm
-}
-
-// CalculateValue return distance in kilometers between current node and provided,
-// if coordinates for provided node found. In other case return math.MaxInt.
-func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int {
- fl := from.LOCODE()
- tl := to.LOCODE()
- if fl == tl {
- return 0
- }
- m := gm.distance.Load()
- if v, ok := (*m)[fl+tl]; ok {
- return v
- }
- return gm.calculateDistance(fl, tl)
-}
-
-func (gm *geoDistanceMetric) calculateDistance(from, to string) int {
- gm.mtx.Lock()
- defer gm.mtx.Unlock()
- od := gm.distance.Load()
- if v, ok := (*od)[from+to]; ok {
- return v
- }
- nd := maps.Clone(*od)
- var dist int
- pointFrom, okFrom := gm.locodes[from]
- pointTo, okTo := gm.locodes[to]
- if okFrom && okTo {
- dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude()))
- } else {
- dist = math.MaxInt
- }
- nd[from+to] = dist
- gm.distance.Store(&nd)
-
- return dist
-}
-
-// distance return amount of KM between two points.
-// Parameters are latitude and longitude of point 1 and 2 in decimal degrees.
-// Original implementation can be found here https://www.geodatasource.com/developers/go.
-func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 {
- radLat1 := math.Pi * lt1 / 180
- radLat2 := math.Pi * lt2 / 180
- radTheta := math.Pi * (ln1 - ln2) / 180
-
- dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta)
-
- if dist > 1 {
- dist = 1
- }
-
- dist = math.Acos(dist)
- dist = dist * 180 / math.Pi
- dist = dist * 60 * 1.1515 * 1.609344
-
- return dist
-}
diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go
index b3f8d9c03..787c04421 100644
--- a/pkg/services/object_manager/placement/netmap.go
+++ b/pkg/services/object_manager/placement/netmap.go
@@ -1,19 +1,25 @@
package placement
import (
- "context"
"crypto/sha256"
"fmt"
+ "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
)
type netMapBuilder struct {
- nmSrc netmap.Source
- containerCache *ContainerNodesCache
+ nmSrc netmap.Source
+ // mtx protects lastNm and containerCache fields.
+ mtx sync.Mutex
+ lastNm *netmapSDK.NetMap
+ // containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
+ // neither netmap nor container has changed.
+ containerCache simplelru.LRUCache[string, [][]netmapSDK.NodeInfo]
}
type netMapSrc struct {
@@ -22,35 +28,59 @@ type netMapSrc struct {
nm *netmapSDK.NetMap
}
+// defaultContainerCacheSize is the default size for the container cache.
+const defaultContainerCacheSize = 10
+
func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder {
+ cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: &netMapSrc{nm: nm},
- containerCache: NewContainerNodesCache(0),
+ containerCache: cache,
}
}
func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
+ cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: nmSrc,
- containerCache: NewContainerNodesCache(0),
+ containerCache: cache,
}
}
-func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) {
+func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) {
return s.nm, nil
}
-func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc)
+func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
+ nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
if err != nil {
return nil, fmt.Errorf("could not get network map: %w", err)
}
- cn, err := b.containerCache.ContainerNodes(nm, cnr, p)
- if err != nil {
- return nil, err
+ binCnr := make([]byte, sha256.Size)
+ cnr.Encode(binCnr)
+
+ b.mtx.Lock()
+ if nm == b.lastNm {
+ raw, ok := b.containerCache.Get(string(binCnr))
+ b.mtx.Unlock()
+ if ok {
+ return BuildObjectPlacement(nm, raw, obj)
+ }
+ } else {
+ b.containerCache.Purge()
+ b.mtx.Unlock()
}
+ cn, err := nm.ContainerNodes(p, binCnr)
+ if err != nil {
+ return nil, fmt.Errorf("could not get container nodes: %w", err)
+ }
+
+ b.mtx.Lock()
+ b.containerCache.Add(string(binCnr), cn)
+ b.mtx.Unlock()
+
return BuildObjectPlacement(nm, cn, obj)
}
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index a3f9af959..dc9ab5e7a 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -1,10 +1,8 @@
package placement
import (
- "context"
"errors"
"fmt"
- "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -22,12 +20,7 @@ type Builder interface {
//
// Must return all container nodes if object identifier
// is nil.
- BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-}
-
-type NodeState interface {
- // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure.
- LocalNodeInfo() *netmap.NodeInfo
+ BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
}
// Option represents placement traverser option.
@@ -57,10 +50,6 @@ type cfg struct {
policy netmap.PlacementPolicy
builder Builder
-
- metrics []Metric
-
- nodeState NodeState
}
const invalidOptsMsg = "invalid traverser options"
@@ -79,7 +68,7 @@ func defaultCfg() *cfg {
}
// NewTraverser creates, initializes with options and returns Traverser instance.
-func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
+func NewTraverser(opts ...Option) (*Traverser, error) {
cfg := defaultCfg()
for i := range opts {
@@ -99,7 +88,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
}
- ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy)
+ ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
if err != nil {
return nil, fmt.Errorf("could not build placement: %w", err)
}
@@ -110,20 +99,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) {
}
var rem []int
- if len(cfg.metrics) > 0 && cfg.nodeState != nil {
- rem = defaultCopiesVector(cfg.policy)
- var unsortedVector []netmap.NodeInfo
- var regularVector []netmap.NodeInfo
- for i := range rem {
- pivot := min(len(ns[i]), rem[i])
- unsortedVector = append(unsortedVector, ns[i][:pivot]...)
- regularVector = append(regularVector, ns[i][pivot:]...)
- }
- rem = []int{-1, -1}
-
- sortedVector := sortVector(cfg, unsortedVector)
- ns = [][]netmap.NodeInfo{sortedVector, regularVector}
- } else if cfg.flatSuccess != nil {
+ if cfg.flatSuccess != nil {
ns = flatNodes(ns)
rem = []int{int(*cfg.flatSuccess)}
} else {
@@ -160,8 +136,8 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int {
replNum := policy.NumberOfReplicas()
copyVector := make([]int, 0, replNum)
- for i := range replNum {
- copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
+ for i := 0; i < replNum; i++ {
+ copyVector = append(copyVector, int(policy.ReplicaNumberByIndex(i)))
}
return copyVector
@@ -181,35 +157,6 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return [][]netmap.NodeInfo{flat}
}
-type nodeMetrics struct {
- index int
- metrics []int
-}
-
-func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo {
- nm := make([]nodeMetrics, len(unsortedVector))
- node := cfg.nodeState.LocalNodeInfo()
-
- for i := range unsortedVector {
- m := make([]int, len(cfg.metrics))
- for j, pm := range cfg.metrics {
- m[j] = pm.CalculateValue(node, &unsortedVector[i])
- }
- nm[i] = nodeMetrics{
- index: i,
- metrics: m,
- }
- }
- slices.SortStableFunc(nm, func(a, b nodeMetrics) int {
- return slices.Compare(a.metrics, b.metrics)
- })
- sortedVector := make([]netmap.NodeInfo, len(unsortedVector))
- for i := range unsortedVector {
- sortedVector[i] = unsortedVector[nm[i].index]
- }
- return sortedVector
-}
-
// Node is a descriptor of storage node with information required for intra-container communication.
type Node struct {
addresses network.AddressGroup
@@ -234,15 +181,6 @@ func (x Node) PublicKey() []byte {
return x.key
}
-// NewNode creates new Node.
-func NewNode(addresses network.AddressGroup, externalAddresses network.AddressGroup, key []byte) Node {
- return Node{
- addresses: addresses,
- externalAddresses: externalAddresses,
- key: key,
- }
-}
-
// Next returns next unprocessed address of the object placement.
//
// Returns nil if no nodes left or traversal operation succeeded.
@@ -265,7 +203,7 @@ func (t *Traverser) Next() []Node {
nodes := make([]Node, count)
- for i := range count {
+ for i := 0; i < count; i++ {
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
if err != nil {
return nil
@@ -288,8 +226,8 @@ func (t *Traverser) Next() []Node {
func (t *Traverser) skipEmptyVectors() {
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
- t.vectors = slices.Delete(t.vectors, i, i+1)
- t.rem = slices.Delete(t.rem, i, i+1)
+ t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
+ t.rem = append(t.rem[:i], t.rem[i+1:]...)
i--
} else {
break
@@ -356,13 +294,6 @@ func SuccessAfter(v uint32) Option {
}
}
-// ResetSuccessAfter resets flat success number setting option.
-func ResetSuccessAfter() Option {
- return func(c *cfg) {
- c.flatSuccess = nil
- }
-}
-
// WithoutSuccessTracking disables success tracking in traversal.
func WithoutSuccessTracking() Option {
return func(c *cfg) {
@@ -375,17 +306,3 @@ func WithCopyNumbers(v []uint32) Option {
c.copyNumbers = v
}
}
-
-// WithPriorityMetrics use provided priority metrics to sort nodes.
-func WithPriorityMetrics(m []Metric) Option {
- return func(c *cfg) {
- c.metrics = m
- }
-}
-
-// WithNodeState provide state of the current node.
-func WithNodeState(s NodeState) Option {
- return func(c *cfg) {
- c.nodeState = s
- }
-}
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index d1370f21e..9b70efc73 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -1,8 +1,6 @@
package placement
import (
- "context"
- "slices"
"strconv"
"testing"
@@ -19,14 +17,12 @@ type testBuilder struct {
vectors [][]netmap.NodeInfo
}
-func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return b.vectors, nil
}
func testNode(v uint32) (n netmap.NodeInfo) {
- ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))
- n.SetNetworkEndpoints(ip)
- n.SetPublicKey([]byte(ip))
+ n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
return n
}
@@ -35,7 +31,8 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
- ns := slices.Clone(v[i])
+ ns := make([]netmap.NodeInfo, len(v[i]))
+ copy(ns, v[i])
vc = append(vc, ns)
}
@@ -43,15 +40,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
return vc
}
-func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) {
- return placement(ss, rs, nil)
-}
-
-func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
- return placement(ss, nil, ec)
-}
-
-func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) {
+func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
nodes := make([][]netmap.NodeInfo, 0, len(rs))
replicas := make([]netmap.ReplicaDescriptor, 0, len(rs))
num := uint32(0)
@@ -59,7 +48,7 @@ func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.C
for i := range ss {
ns := make([]netmap.NodeInfo, 0, ss[i])
- for range ss[i] {
+ for j := 0; j < ss[i]; j++ {
ns = append(ns, testNode(num))
num++
}
@@ -67,12 +56,7 @@ func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.C
nodes = append(nodes, ns)
var rd netmap.ReplicaDescriptor
- if len(rs) > 0 {
- rd.SetNumberOfObjects(uint32(rs[i]))
- } else {
- rd.SetECDataCount(uint32(ec[i][0]))
- rd.SetECParityCount(uint32(ec[i][1]))
- }
+ rd.SetNumberOfObjects(uint32(rs[i]))
replicas = append(replicas, rd)
}
@@ -99,11 +83,11 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{2, 3}
replicas := []int{1, 2}
- nodes, cnr := testPlacement(selectors, replicas)
+ nodes, cnr := testPlacement(t, selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithoutSuccessTracking(),
@@ -128,11 +112,11 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{5, 3}
replicas := []int{2, 2}
- nodes, cnr := testPlacement(selectors, replicas)
+ nodes, cnr := testPlacement(t, selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: nodesCopy,
@@ -141,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
)
require.NoError(t, err)
- for range len(nodes[0]) {
+ for i := 0; i < len(nodes[0]); i++ {
require.NotNil(t, tr.Next())
}
@@ -150,18 +134,18 @@ func TestTraverserObjectScenarios(t *testing.T) {
err = n.FromIterator(netmapcore.Node(nodes[1][0]))
require.NoError(t, err)
- require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next())
+ require.Equal(t, []Node{{addresses: n}}, tr.Next())
})
t.Run("put scenario", func(t *testing.T) {
selectors := []int{5, 3}
replicas := []int{2, 2}
- nodes, cnr := testPlacement(selectors, replicas)
+ nodes, cnr := testPlacement(t, selectors, replicas)
nodesCopy := copyVectors(nodes)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
)
@@ -180,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
require.Empty(t, tr.Next())
require.False(t, tr.Success())
- for range replicas[curVector] {
+ for i := 0; i < replicas[curVector]; i++ {
tr.SubmitSuccess()
}
}
@@ -200,9 +184,9 @@ func TestTraverserObjectScenarios(t *testing.T) {
selectors := []int{2, 3}
replicas := []int{1, 2}
- nodes, cnr := testPlacement(selectors, replicas)
+ nodes, cnr := testPlacement(t, selectors, replicas)
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
@@ -229,7 +213,7 @@ func TestTraverserRemValues(t *testing.T) {
selectors := []int{3, 4, 5}
replicas := []int{2, 3, 4}
- nodes, cnr := testPlacement(selectors, replicas)
+ nodes, cnr := testPlacement(t, selectors, replicas)
nodesCopy := copyVectors(nodes)
testCases := [...]struct {
@@ -277,7 +261,7 @@ func TestTraverserRemValues(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
- tr, err := NewTraverser(context.Background(),
+ tr, err := NewTraverser(
ForContainer(cnr),
UseBuilder(&testBuilder{vectors: nodesCopy}),
WithCopyNumbers(testCase.copyNumbers),
@@ -291,363 +275,3 @@ func TestTraverserRemValues(t *testing.T) {
})
}
}
-
-type nodeState struct {
- node *netmap.NodeInfo
-}
-
-func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo {
- return n.node
-}
-
-func TestTraverserPriorityMetrics(t *testing.T) {
- t.Run("one rep one metric", func(t *testing.T) {
- selectors := []int{4}
- replicas := []int{3}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "B")
- // Node_3, PK - ip4/0.0.0.0/tcp/3
- nodes[0][3].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 3, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
-
- next = tr.Next()
- // The last node is
- require.Equal(t, 1, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("one rep one metric fewer nodes", func(t *testing.T) {
- selectors := []int{2}
- replicas := []int{3}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A} ]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_1 B}, {Node_0 A} ]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("two reps two metrics", func(t *testing.T) {
- selectors := []int{3, 3}
- replicas := []int{2, 2}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // REPLICA #1
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- nodes[0][0].SetAttribute("UN-LOCODE", "RU LED")
-
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL")
-
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "A")
- nodes[0][2].SetAttribute("UN-LOCODE", "RU LED")
-
- // REPLICA #2
- // Node_3 ip4/0.0.0.0/tcp/3
- nodes[1][0].SetAttribute("ClusterName", "B")
- nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW")
-
- // Node_4, PK - ip4/0.0.0.0/tcp/4
- nodes[1][1].SetAttribute("ClusterName", "B")
- nodes[1][1].SetAttribute("UN-LOCODE", "RU DME")
-
- // Node_5, PK - ip4/0.0.0.0/tcp/5
- nodes[1][2].SetAttribute("ClusterName", "B")
- nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW")
-
- sdkNode := testNode(9)
- sdkNode.SetAttribute("ClusterName", "B")
- sdkNode.SetAttribute("UN-LOCODE", "RU DME")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{
- NewAttributeMetric("ClusterName"),
- NewAttributeMetric("UN-LOCODE"),
- }
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Check that nodes in the same cluster and
- // in the same location should be the first in slice.
- // Nodes which are follow criteria but stay outside the replica
- // should be in the next slice.
-
- next := tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
-
- sdkNode.SetAttribute("ClusterName", "B")
- sdkNode.SetAttribute("UN-LOCODE", "RU MOW")
-
- nodesCopy = copyVectors(nodes)
-
- tr, err = NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- next = tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
-
- sdkNode.SetAttribute("ClusterName", "A")
- sdkNode.SetAttribute("UN-LOCODE", "RU LED")
-
- nodesCopy = copyVectors(nodes)
-
- tr, err = NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- next = tr.Next()
- require.Equal(t, 4, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey()))
-
- next = tr.Next()
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("ec container", func(t *testing.T) {
- selectors := []int{4}
- ec := [][]int{{2, 1}}
-
- nodes, cnr := testECPlacement(selectors, ec)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("ClusterName", "A")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("ClusterName", "A")
- // Node_2, PK - ip4/0.0.0.0/tcp/2
- nodes[0][2].SetAttribute("ClusterName", "B")
- // Node_3, PK - ip4/0.0.0.0/tcp/3
- nodes[0][3].SetAttribute("ClusterName", "B")
-
- sdkNode := testNode(5)
- sdkNode.SetAttribute("ClusterName", "B")
-
- nodesCopy := copyVectors(nodes)
-
- m := []Metric{NewAttributeMetric("ClusterName")}
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `ClusterName` the order will be:
- // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}]
- // With priority metric `ClusterName` and current node in cluster B
- // the order should be:
- // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 3, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey()))
-
- next = tr.Next()
- // The last node is
- require.Equal(t, 1, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-
- t.Run("one rep one geo metric", func(t *testing.T) {
- t.Skip()
- selectors := []int{2}
- replicas := []int{2}
-
- nodes, cnr := testPlacement(selectors, replicas)
-
- // Node_0, PK - ip4/0.0.0.0/tcp/0
- nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW")
- // Node_1, PK - ip4/0.0.0.0/tcp/1
- nodes[0][1].SetAttribute("UN-LOCODE", "RU LED")
-
- sdkNode := testNode(2)
- sdkNode.SetAttribute("UN-LOCODE", "FI HEL")
-
- nodesCopy := copyVectors(nodes)
-
- parser, err := NewMetricsParser("/path/to/locode_db")
- require.NoError(t, err)
- m, err := parser.ParseMetrics([]string{geoDistance})
- require.NoError(t, err)
-
- tr, err := NewTraverser(context.Background(),
- ForContainer(cnr),
- UseBuilder(&testBuilder{
- vectors: nodesCopy,
- }),
- WithoutSuccessTracking(),
- WithPriorityMetrics(m),
- WithNodeState(&nodeState{
- node: &sdkNode,
- }),
- )
- require.NoError(t, err)
-
- // Without priority metric `$geoDistance` the order will be:
- // [ {Node_0 RU MOW}, {Node_1 RU LED}]
- // With priority metric `$geoDistance` the order should be:
- // [ {Node_1 RU LED}, {Node_0 RU MOW}]
- next := tr.Next()
- require.NotNil(t, next)
- require.Equal(t, 2, len(next))
- require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
- require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
-
- next = tr.Next()
- require.Nil(t, next)
- })
-}
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index e5f001d5a..c3c810001 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -4,9 +4,9 @@ import (
"context"
"strconv"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
@@ -57,12 +57,14 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
- log.Warn(ctx,
+ log.Warn(
logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
- } else if ts != nil {
- return g.handleTS(ctx, addrStr, ts, epoch)
+ } else {
+ if ts != nil {
+ return g.handleTS(addrStr, ts, epoch)
+ }
}
// requested tombstone not
@@ -70,12 +72,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
return false
}
-func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
+func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
- if atr.Key() == objectV2.SysAttributeExpEpoch {
+ if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
- g.log.Warn(ctx,
+ g.log.Warn(
logs.TombstoneExpirationParseFailure,
zap.Error(err),
)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 2147a32fe..9d33e8179 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -3,7 +3,6 @@ package tombstone
import (
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
@@ -24,7 +23,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.NewNop()),
+ log: &logger.Logger{Logger: zap.NewNop()},
cacheSize: defaultLRUCacheSize,
}
}
@@ -50,7 +49,9 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New[string, uint64](cfg.cacheSize)
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize))
+ if err != nil {
+ panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
+ }
return &ExpirationChecker{
cache: cache,
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 975941847..1ff07b05a 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -39,7 +38,9 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) {
// Panics if any of the provided options does not allow
// constructing a valid tombstone local Source.
func NewSource(p TombstoneSourcePrm) Source {
- assert.False(p.s == nil, "Tombstone source: nil object service")
+ if p.s == nil {
+ panic("Tombstone source: nil object service")
+ }
return Source(p)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index dcaaec0b4..c04273468 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -2,40 +2,34 @@ package policer
import (
"context"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
-func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes(
- attribute.String("address", objInfo.Address.String()),
- attribute.Bool("is_linking_object", objInfo.IsLinkingObject),
- attribute.Bool("is_ec_part", objInfo.ECInfo != nil),
- attribute.String("type", objInfo.Type.String()),
- ))
- defer span.End()
+func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.AddressWithType) error {
+ addr := addrWithType.Address
+ idCnr := addr.Container()
+ idObj := addr.Object()
- cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container())
+ cnr, err := p.cnrSrc.Get(idCnr)
if err != nil {
if client.IsErrContainerNotFound(err) {
- existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container())
+ existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, idCnr)
if errWasRemoved != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved)
} else if existed {
- err := p.buryFn(ctx, objInfo.Address)
+ err := p.buryFn(ctx, addrWithType.Address)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotInhumeObjectWithMissingContainer, err)
}
@@ -47,22 +41,18 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
policy := cnr.Value.PlacementPolicy()
- if policycore.IsECPlacement(policy) {
- return p.processECContainerObject(ctx, objInfo, cnr.Value)
- }
- return p.processRepContainerObject(ctx, objInfo, policy)
-}
-
-func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
- idObj := objInfo.Address.Object()
- idCnr := objInfo.Address.Container()
- nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy)
+ nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
c := &placementRequirements{}
+ var numOfContainerNodes int
+ for i := range nn {
+ numOfContainerNodes += len(nn[i])
+ }
+
// cached info about already checked nodes
checkedNodes := newNodeCache()
@@ -73,24 +63,15 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
default:
}
- shortage := policy.ReplicaDescriptor(i).NumberOfObjects()
- if objInfo.Type == objectSDK.TypeLock || objInfo.Type == objectSDK.TypeTombstone || objInfo.IsLinkingObject {
- // all nodes of a container must store the `LOCK`, `TOMBSTONE` and linking objects
- // for correct object removal protection:
- // - `LOCK`, `TOMBSTONE` and linking objects are broadcast on their PUT requests;
- // - `LOCK` object removal is a prohibited action in the GC.
- shortage = uint32(len(nn[i]))
- }
-
- p.processRepNodes(ctx, c, objInfo, nn[i], shortage, checkedNodes)
+ p.processNodes(ctx, c, addrWithType, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
- zap.Stringer("object", objInfo.Address),
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+ zap.Stringer("object", addr),
)
- p.cbRedundantCopy(ctx, objInfo.Address)
+ p.cbRedundantCopy(ctx, addr)
}
return nil
}
@@ -103,14 +84,22 @@ type placementRequirements struct {
removeLocalCopy bool
}
-func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRequirements, objInfo objectcore.Info,
- nodes []netmap.NodeInfo, shortage uint32, checkedNodes nodeCache,
-) {
- addr := objInfo.Address
+func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
+ nodes []netmap.NodeInfo, shortage uint32, checkedNodes nodeCache) {
+ addr := addrWithType.Address
+ typ := addrWithType.Type
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
- var candidates []netmap.NodeInfo
+
+ if typ == objectSDK.TypeLock {
+ // all nodes of a container must store the `LOCK` objects
+ // for correct object removal protection:
+ // - `LOCK` objects are broadcast on their PUT requests;
+ // - `LOCK` object removal is a prohibited action in the GC.
+ shortage = uint32(len(nodes))
+ }
+
for i := 0; shortage > 0 && i < len(nodes); i++ {
select {
case <-ctx.Done():
@@ -118,76 +107,78 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
default:
}
- var err error
- st := checkedNodes.processStatus(nodes[i])
- if !st.Processed() {
- st, err = p.checkStatus(ctx, addr, nodes[i])
- checkedNodes.set(nodes[i], st)
- if st == nodeDoesNotHoldObject {
- // 1. This is the first time the node is encountered (`!st.Processed()`).
- // 2. The node does not hold object (`st == nodeDoesNotHoldObject`).
- // So we need to try to put an object to it.
- candidates = append(candidates, nodes[i])
- continue
- }
- }
-
- switch st {
- case nodeIsLocal:
+ if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
requirements.needLocalCopy = true
shortage--
- case nodeIsUnderMaintenance:
- shortage--
- uncheckedCopies++
+ } else if nodes[i].IsMaintenance() {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else {
+ if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
+ if status == nodeHoldsObject {
+ // node already contains replica, no need to replicate
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
+ shortage--
+ }
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK,
- zap.String("node", netmap.StringifyPublicKey(nodes[i])))
- case nodeHoldsObject:
- shortage--
- case nodeDoesNotHoldObject:
- case nodeStatusUnknown:
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
- zap.Stringer("object", addr),
- zap.Error(err))
- default:
- panic("unreachable")
+ continue
+ }
+
+ callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
+
+ _, err := p.remoteHeader(callCtx, nodes[i], addr)
+
+ cancel()
+
+ if client.IsErrObjectNotFound(err) {
+ checkedNodes.submitReplicaCandidate(nodes[i])
+ continue
+ }
+
+ if isClientErrMaintenance(err) {
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
+ } else if err != nil {
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+ zap.Stringer("object", addr),
+ zap.String("error", err.Error()),
+ )
+ } else {
+ shortage--
+ checkedNodes.submitReplicaHolder(nodes[i])
+ }
}
+
+ nodes = append(nodes[:i], nodes[i+1:]...)
+ i--
}
- p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies)
+ p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
}
-func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
- return nodeIsLocal, nil
- }
- if node.Status().IsMaintenance() {
- return nodeIsUnderMaintenance, nil
- }
+// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
+//
+// consider remote nodes under maintenance as problem OK. Such
+// nodes MAY not respond with object, however, this is how we
+// prevent spam with new replicas.
+// However, additional copies should not be removed in this case,
+// because we can remove the only copy this way.
+func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
+ checkedNodes.submitReplicaHolder(node)
+ shortage--
+ uncheckedCopies++
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
- _, err := p.remoteHeader(callCtx, node, addr, false)
- cancel()
-
- if err == nil {
- return nodeHoldsObject, nil
- }
- if client.IsErrObjectNotFound(err) {
- return nodeDoesNotHoldObject, nil
- }
- if client.IsErrNodeUnderMaintenance(err) {
- return nodeIsUnderMaintenance, nil
- }
- return nodeStatusUnknown, err
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+ zap.String("node", netmap.StringifyPublicKey(node)),
+ )
+ return shortage, uncheckedCopies
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
- nodes []netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int,
-) {
+ nodes []netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) {
switch {
case shortage > 0:
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -198,12 +189,12 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
Nodes: nodes,
}
- p.replicator.HandleReplicationTask(ctx, task, checkedNodes)
+ p.replicator.HandleTask(ctx, task, checkedNodes)
case uncheckedCopies > 0:
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+ p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
case uncheckedCopies == 0:
@@ -211,3 +202,27 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
requirements.removeLocalCopy = true
}
}
+
+// isClientErrMaintenance checks if err corresponds to FrostFS status return
+// which tells that node is currently under maintenance. Supports wrapped
+// errors.
+//
+// Similar to client.IsErr___ errors, consider replacing to FrostFS SDK.
+func isClientErrMaintenance(err error) bool {
+ switch unwrapErr(err).(type) {
+ default:
+ return false
+ case
+ *apistatus.NodeUnderMaintenance:
+ return true
+ }
+}
+
+// unwrapErr unwraps error using errors.Unwrap.
+func unwrapErr(err error) error {
+ for e := errors.Unwrap(err); e != nil; e = errors.Unwrap(err) {
+ err = e
+ }
+
+ return err
+}
diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go
index 69879c439..d4c7ccbf9 100644
--- a/pkg/services/policer/check_test.go
+++ b/pkg/services/policer/check_test.go
@@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
- cache.set(node, nodeDoesNotHoldObject)
+ cache.submitReplicaCandidate(node)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
- cache.set(node, nodeHoldsObject)
+ cache.submitReplicaHolder(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
deleted file mode 100644
index fbdeb3148..000000000
--- a/pkg/services/policer/ec.go
+++ /dev/null
@@ -1,395 +0,0 @@
-package policer
-
-import (
- "context"
- "encoding/hex"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
- "golang.org/x/sync/errgroup"
-)
-
-var errNoECinfoReturnded = errors.New("no EC info returned")
-
-type ecChunkProcessResult struct {
- validPlacement bool
- removeLocal bool
-}
-
-var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
-
-func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
- if objInfo.ECInfo == nil {
- return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
- }
- return p.processECContainerECObject(ctx, objInfo, cnr)
-}
-
-// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
-// All of them must be stored on all of the container nodes.
-func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
- objID := objInfo.Address.Object()
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy)
- if err != nil {
- return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
- }
- if len(nn) != 1 || len(nn[0]) == 0 {
- return errInvalidECPlacement
- }
-
- c := &placementRequirements{}
- checkedNodes := newNodeCache()
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
-
- if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
- zap.Stringer("object", objInfo.Address),
- )
-
- p.cbRedundantCopy(ctx, objInfo.Address)
- }
- return nil
-}
-
-func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
- nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
- if err != nil {
- return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
- }
- if len(nn) != 1 || len(nn[0]) == 0 {
- return errInvalidECPlacement
- }
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- res := p.processECChunk(ctx, objInfo, nn[0])
- if !res.validPlacement {
- // drop local chunk only if all required chunks are in place
- res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
- }
- p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
-
- if res.removeLocal {
- p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
- p.cbRedundantCopy(ctx, objInfo.Address)
- }
- return nil
-}
-
-// processECChunk replicates EC chunk if needed.
-func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
- var removeLocalChunk bool
- requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
- if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
- // current node is required node, we are happy
- return ecChunkProcessResult{
- validPlacement: true,
- }
- }
- if requiredNode.Status().IsMaintenance() {
- // consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
- return ecChunkProcessResult{}
- }
-
- callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
- _, err := p.remoteHeader(callCtx, requiredNode, objInfo.Address, false)
- cancel()
-
- if err == nil {
- removeLocalChunk = true
- } else if client.IsErrObjectNotFound(err) {
- p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
- task := replicator.Task{
- NumCopies: 1,
- Addr: objInfo.Address,
- Nodes: []netmap.NodeInfo{requiredNode},
- }
- p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
- } else if client.IsErrNodeUnderMaintenance(err) {
- // consider maintenance mode has object, but do not drop local copy
- p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
- } else {
- p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
- }
-
- return ecChunkProcessResult{
- removeLocal: removeLocalChunk,
- }
-}
-
-func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
- var parentAddress oid.Address
- parentAddress.SetContainer(objInfo.Address.Container())
- parentAddress.SetObject(objInfo.ECInfo.ParentID)
-
- requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
- if len(requiredChunkIndexes) == 0 {
- p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
- return true
- }
-
- err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
- if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
- return false
- }
- if len(requiredChunkIndexes) == 0 {
- return true
- }
-
- indexToObjectID := make(map[uint32]oid.ID)
- success := p.resolveRemoteECChunks(ctx, parentAddress, nodes, requiredChunkIndexes, indexToObjectID)
- if !success {
- return false
- }
-
- for index, candidates := range requiredChunkIndexes {
- var addr oid.Address
- addr.SetContainer(objInfo.Address.Container())
- addr.SetObject(indexToObjectID[index])
- p.replicator.HandlePullTask(ctx, replicator.Task{
- Addr: addr,
- Nodes: candidates,
- Container: cnr,
- })
- }
- // there was some missing chunks, it's not ok
- return false
-}
-
-func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objectcore.Info) map[uint32][]netmap.NodeInfo {
- requiredChunkIndexes := make(map[uint32][]netmap.NodeInfo)
- for i, n := range nodes {
- if uint32(i) == objInfo.ECInfo.Total {
- break
- }
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
- requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
- }
- }
- return requiredChunkIndexes
-}
-
-func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Address, required map[uint32][]netmap.NodeInfo) error {
- _, err := p.localHeader(ctx, parentAddress)
- var eiErr *objectSDK.ECInfoError
- if err == nil { // should not be happen
- return errNoECinfoReturnded
- }
- if !errors.As(err, &eiErr) {
- return err
- }
- for _, ch := range eiErr.ECInfo().Chunks {
- delete(required, ch.Index)
- }
- return nil
-}
-
-func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
- var eiErr *objectSDK.ECInfoError
- for _, n := range nodes {
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
- continue
- }
- _, err := p.remoteHeader(ctx, n, parentAddress, true)
- if !errors.As(err, &eiErr) {
- continue
- }
- for _, ch := range eiErr.ECInfo().Chunks {
- if candidates, ok := required[ch.Index]; ok {
- candidates = append(candidates, n)
- required[ch.Index] = candidates
-
- var chunkID oid.ID
- if err := chunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
- return false
- }
- if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
- zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
- return false
- }
- indexToObjectID[ch.Index] = chunkID
- }
- }
- }
-
- for index, candidates := range required {
- if len(candidates) == 0 {
- p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
- return false
- }
- }
-
- return true
-}
-
-func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
- var parentAddress oid.Address
- parentAddress.SetContainer(objInfo.Address.Container())
- parentAddress.SetObject(objInfo.ECInfo.ParentID)
- var eiErr *objectSDK.ECInfoError
- resolved := make(map[uint32][]netmap.NodeInfo)
- chunkIDs := make(map[uint32]oid.ID)
- restore := true // do not restore EC chunks if some node returned error
- for idx, n := range nodes {
- if uint32(idx) >= objInfo.ECInfo.Total && uint32(len(resolved)) == objInfo.ECInfo.Total {
- return
- }
- var err error
- if p.netmapKeys.IsLocalKey(n.PublicKey()) {
- _, err = p.localHeader(ctx, parentAddress)
- } else {
- _, err = p.remoteHeader(ctx, n, parentAddress, true)
- }
-
- if errors.As(err, &eiErr) {
- for _, ch := range eiErr.ECInfo().Chunks {
- resolved[ch.Index] = append(resolved[ch.Index], n)
- var ecInfoChunkID oid.ID
- if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
- return
- }
- if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
- p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
- zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
- return
- }
- chunkIDs[ch.Index] = ecInfoChunkID
- }
- } else if client.IsErrObjectAlreadyRemoved(err) {
- restore = false
- } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
- p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
- p.replicator.HandleReplicationTask(ctx, replicator.Task{
- NumCopies: 1,
- Addr: objInfo.Address,
- Nodes: []netmap.NodeInfo{n},
- }, newNodeCache())
- restore = false
- }
- }
- if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
- return
- }
- if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
- var found []uint32
- for i := range resolved {
- found = append(found, i)
- }
- p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
- return
- }
- p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
-}
-
-func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
- cnr containerSDK.Container,
-) {
- c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
- if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
- return
- }
- parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
- if parts == nil {
- return
- }
- key, err := p.keyStorage.GetKey(nil)
- if err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
- return
- }
- required := make([]bool, len(parts))
- for i, p := range parts {
- if p == nil {
- required[i] = true
- }
- }
- if err := c.ReconstructParts(parts, required, key); err != nil {
- p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
- return
- }
- for idx, part := range parts {
- if _, exists := existedChunks[uint32(idx)]; exists {
- continue
- }
- var addr oid.Address
- addr.SetContainer(parentAddress.Container())
- pID, _ := part.ID()
- addr.SetObject(pID)
- targetNode := nodes[idx%len(nodes)]
- if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
- p.replicator.HandleLocalPutTask(ctx, replicator.Task{
- Addr: addr,
- Obj: part,
- Container: cnr,
- })
- } else {
- p.replicator.HandleReplicationTask(ctx, replicator.Task{
- NumCopies: 1,
- Addr: addr,
- Nodes: []netmap.NodeInfo{targetNode},
- Obj: part,
- }, newNodeCache())
- }
- }
-}
-
-func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.Info, existedChunks map[uint32][]netmap.NodeInfo, parentAddress oid.Address, chunkIDs map[uint32]oid.ID) []*objectSDK.Object {
- parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
- errGroup, egCtx := errgroup.WithContext(ctx)
- for idx, nodes := range existedChunks {
- errGroup.Go(func() error {
- var objID oid.Address
- objID.SetContainer(parentAddress.Container())
- objID.SetObject(chunkIDs[idx])
- var obj *objectSDK.Object
- var err error
- for _, node := range nodes {
- if p.netmapKeys.IsLocalKey(node.PublicKey()) {
- obj, err = p.localObject(egCtx, objID)
- } else {
- obj, err = p.remoteObject(egCtx, node, objID)
- }
- if err == nil {
- break
- }
- p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
- }
- if obj != nil {
- parts[idx] = obj
- }
- return nil
- })
- }
- if err := errGroup.Wait(); err != nil {
- p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
- return nil
- }
- return parts
-}
diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go
deleted file mode 100644
index c6980536b..000000000
--- a/pkg/services/policer/ec_test.go
+++ /dev/null
@@ -1,710 +0,0 @@
-package policer
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "errors"
- "fmt"
- "sync/atomic"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestECChunkHasValidPlacement(t *testing.T) {
- t.Parallel()
- chunkAddress := oidtest.Address()
- parentID := oidtest.ID()
-
- var policy netmapSDK.PlacementPolicy
- require.NoError(t, policy.DecodeString("EC 2.1"))
-
- cnr := &container.Container{}
- cnr.Value.Init()
- cnr.Value.SetPlacementPolicy(policy)
- containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
- if id.Equals(chunkAddress.Container()) {
- return cnr, nil
- }
- return nil, new(apistatus.ContainerNotFound)
- },
- }
-
- nodes := make([]netmapSDK.NodeInfo, 4)
- for i := range nodes {
- nodes[i].SetPublicKey([]byte{byte(i)})
- }
-
- placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) {
- return [][]netmapSDK.NodeInfo{nodes}, nil
- }
- return nil, errors.New("unexpected placement build")
- }
-
- remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- require.True(t, raw, "remote header for parent object must be called with raw flag")
- index := int(ni.PublicKey()[0])
- require.True(t, index == 1 || index == 2, "invalid node to get parent header")
- require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = uint32(index)
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
- require.True(t, a.Container() == chunkAddress.Container() && a.Object() == parentID, "invalid address to get remote header")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = uint32(0)
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(remoteHeadFn),
- WithLocalObjectHeaderFunc(localHeadFn),
- WithPool(testPool(t)),
- )
-
- objInfo := objectcore.Info{
- Address: chunkAddress,
- Type: objectSDK.TypeRegular,
- ECInfo: &objectcore.ECInfo{
- ParentID: parentID,
- Index: 0,
- Total: 3,
- },
- }
- err := p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
-}
-
-func TestECChunkHasInvalidPlacement(t *testing.T) {
- t.Parallel()
- chunkAddress := oidtest.Address()
- parentID := oidtest.ID()
- chunkObject := objectSDK.New()
- chunkObject.SetContainerID(chunkAddress.Container())
- chunkObject.SetID(chunkAddress.Object())
- chunkObject.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
- chunkObject.SetPayloadSize(uint64(10))
- chunkObject.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: parentID}, 0, 3, []byte{}, 0))
-
- var policy netmapSDK.PlacementPolicy
- require.NoError(t, policy.DecodeString("EC 2.1"))
-
- cnr := &container.Container{}
- cnr.Value.Init()
- cnr.Value.SetPlacementPolicy(policy)
- containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
- if id.Equals(chunkAddress.Container()) {
- return cnr, nil
- }
- return nil, new(apistatus.ContainerNotFound)
- },
- }
-
- nodes := make([]netmapSDK.NodeInfo, 4)
- for i := range nodes {
- nodes[i].SetPublicKey([]byte{byte(i)})
- }
-
- placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- if cnr.Equals(chunkAddress.Container()) && obj.Equals(parentID) {
- return [][]netmapSDK.NodeInfo{nodes}, nil
- }
- return nil, errors.New("unexpected placement build")
- }
-
- objInfo := objectcore.Info{
- Address: chunkAddress,
- Type: objectSDK.TypeRegular,
- ECInfo: &objectcore.ECInfo{
- ParentID: parentID,
- Index: 1,
- Total: 3,
- },
- }
-
- t.Run("node0 has chunk1, node1 has chunk0 and chunk1", func(t *testing.T) {
- // policer should pull chunk0 on first run and drop chunk1 on second run
- var allowDrop bool
- requiredChunkID := oidtest.ID()
- headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
- return chunkObject, nil
- }
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- ch.Index = 0
- ch.SetID(requiredChunkID)
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 2
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- return nil, new(apistatus.ObjectNotFound)
- }
- require.Fail(t, "unexpected remote HEAD")
- return nil, fmt.Errorf("unexpected remote HEAD")
- }
-
- localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
- require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
- if allowDrop {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- ch.SetID(requiredChunkID)
- ch.Index = 0
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- var pullCounter atomic.Int64
- var dropped []oid.Address
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(headFn),
- WithLocalObjectHeaderFunc(localHeadF),
- WithReplicator(&testReplicator{
- handlePullTask: (func(ctx context.Context, r replicator.Task) {
- require.True(t, r.Addr.Container() == chunkAddress.Container() && r.Addr.Object() == requiredChunkID &&
- len(r.Nodes) == 1 && bytes.Equal(r.Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid pull task")
- pullCounter.Add(1)
- }),
- }),
- WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
- require.True(t, allowDrop, "invalid redundent copy call")
- dropped = append(dropped, a)
- }),
- WithPool(testPool(t)),
- )
-
- err := p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count")
- require.Equal(t, 0, len(dropped), "invalid dropped count")
- allowDrop = true
- err = p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- require.Equal(t, int64(1), pullCounter.Load(), "invalid pull count")
- require.Equal(t, 1, len(dropped), "invalid dropped count")
- require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
- })
-
- t.Run("node0 has chunk0 and chunk1, node1 has chunk1", func(t *testing.T) {
- // policer should drop chunk1
- headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
- return chunkObject, nil
- }
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkAddress.Object())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 2
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- return nil, new(apistatus.ObjectNotFound)
- }
- require.Fail(t, "unexpected remote HEAD")
- return nil, fmt.Errorf("unexpected remote HEAD")
- }
-
- localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
- require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkAddress.Object())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- ch.SetID(oidtest.ID())
- ch.Index = 0
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- var dropped []oid.Address
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(headFn),
- WithLocalObjectHeaderFunc(localHeadF),
- WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
- dropped = append(dropped, a)
- }),
- WithPool(testPool(t)),
- )
-
- err := p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- require.Equal(t, 1, len(dropped), "invalid dropped count")
- require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
- })
-
- t.Run("node0 has chunk0 and chunk1, node1 has no chunks", func(t *testing.T) {
- // policer should replicate chunk1 to node1 on first run and drop chunk1 on node0 on second run
- var secondRun bool
- headFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a == chunkAddress && !raw {
- if !secondRun {
- return nil, new(apistatus.ObjectNotFound)
- }
- return chunkObject, nil
- }
- if bytes.Equal(ni.PublicKey(), nodes[1].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkAddress.Object())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[2].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(oidtest.ID())
- ch.Index = 2
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if bytes.Equal(ni.PublicKey(), nodes[3].PublicKey()) && a.Container() == chunkAddress.Container() &&
- a.Object() == parentID && raw {
- return nil, new(apistatus.ObjectNotFound)
- }
- require.Fail(t, "unexpected remote HEAD")
- return nil, fmt.Errorf("unexpected remote HEAD")
- }
-
- localHeadF := func(_ context.Context, addr oid.Address) (*objectSDK.Object, error) {
- require.True(t, addr.Container() == chunkAddress.Container() && addr.Object() == parentID, "unexpected local HEAD")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkAddress.Object())
- ch.Index = 1
- ch.Total = 3
- ei.AddChunk(ch)
- ch.SetID(oidtest.ID())
- ch.Index = 0
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- var dropped []oid.Address
- var replicated []replicator.Task
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(headFn),
- WithLocalObjectHeaderFunc(localHeadF),
- WithRedundantCopyCallback(func(ctx context.Context, a oid.Address) {
- dropped = append(dropped, a)
- }),
- WithReplicator(&testReplicator{
- handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) {
- replicated = append(replicated, t)
- },
- }),
- WithPool(testPool(t)),
- )
-
- err := p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- require.Equal(t, 0, len(dropped), "invalid dropped count")
- require.Equal(t, 1, len(replicated), "invalid replicated count")
- require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object")
- require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target")
-
- secondRun = true
- err = p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- require.Equal(t, 1, len(replicated), "invalid replicated count")
- require.Equal(t, chunkAddress, replicated[0].Addr, "invalid replicated object")
- require.True(t, bytes.Equal(replicated[0].Nodes[0].PublicKey(), nodes[1].PublicKey()), "invalid replicate target")
- require.Equal(t, 1, len(dropped), "invalid dropped count")
- require.True(t, chunkAddress.Equals(dropped[0]), "invalid dropped object")
- })
-}
-
-func TestECChunkRestore(t *testing.T) {
- // node0 has chunk0, node1 has chunk1
- // policer should replicate chunk0 to node2 on the first run
- // then restore EC object and replicate chunk2 to node2 on the second run
- t.Parallel()
-
- payload := make([]byte, 64)
- rand.Read(payload)
- parentAddress := oidtest.Address()
- parentObject := objectSDK.New()
- parentObject.SetContainerID(parentAddress.Container())
- parentObject.SetPayload(payload)
- parentObject.SetPayloadSize(64)
- objectSDK.CalculateAndSetPayloadChecksum(parentObject)
- err := objectSDK.CalculateAndSetID(parentObject)
- require.NoError(t, err)
- id, _ := parentObject.ID()
- parentAddress.SetObject(id)
-
- chunkIDs := make([]oid.ID, 3)
- c, err := erasurecode.NewConstructor(2, 1)
- require.NoError(t, err)
- key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- chunks, err := c.Split(parentObject, &key.PrivateKey)
- require.NoError(t, err)
- for i, ch := range chunks {
- chunkIDs[i], _ = ch.ID()
- }
-
- var policy netmapSDK.PlacementPolicy
- require.NoError(t, policy.DecodeString("EC 2.1"))
-
- cnr := &container.Container{}
- cnr.Value.Init()
- cnr.Value.SetPlacementPolicy(policy)
- containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
- if id.Equals(parentAddress.Container()) {
- return cnr, nil
- }
- return nil, new(apistatus.ContainerNotFound)
- },
- }
-
- nodes := make([]netmapSDK.NodeInfo, 4)
- for i := range nodes {
- nodes[i].SetPublicKey([]byte{byte(i)})
- }
-
- placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) {
- return [][]netmapSDK.NodeInfo{nodes}, nil
- }
- return nil, errors.New("unexpected placement build")
- }
- var secondRun bool
- remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- require.True(t, raw, "remote header for parent object must be called with raw flag")
- index := int(ni.PublicKey()[0])
- require.True(t, index == 1 || index == 2 || index == 3, "invalid node to get parent header")
- require.True(t, a == parentAddress, "invalid address to get remote header")
- if index == 1 {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[1])
- ch.Index = uint32(1)
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if index == 2 && secondRun {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[0])
- ch.Index = uint32(0)
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- return nil, new(apistatus.ObjectNotFound)
- }
-
- localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
- require.True(t, a == parentAddress, "invalid address to get remote header")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[0])
- ch.Index = uint32(0)
- ch.Total = 3
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- var replicatedObj []*objectSDK.Object
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(remoteHeadFn),
- WithLocalObjectHeaderFunc(localHeadFn),
- WithReplicator(&testReplicator{
- handleReplicationTask: func(ctx context.Context, t replicator.Task, tr replicator.TaskResult) {
- if t.Obj != nil {
- replicatedObj = append(replicatedObj, t.Obj)
- }
- },
- }),
- WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request")
- return chunks[0], nil
- }),
- WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
- index := ni.PublicKey()[0]
- if index == 2 {
- return nil, new(apistatus.ObjectNotFound)
- }
- return chunks[index], nil
- }),
- WithPool(testPool(t)),
- WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)),
- )
-
- var chunkAddress oid.Address
- chunkAddress.SetContainer(parentAddress.Container())
- chunkAddress.SetObject(chunkIDs[0])
- objInfo := objectcore.Info{
- Address: chunkAddress,
- Type: objectSDK.TypeRegular,
- ECInfo: &objectcore.ECInfo{
- ParentID: parentAddress.Object(),
- Index: 0,
- Total: 3,
- },
- }
- err = p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
- secondRun = true
- err = p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count")
- chunks[2].SetSignature(nil)
- expectedData, err := chunks[2].MarshalJSON()
- require.NoError(t, err)
- replicatedObj[0].SetSignature(nil)
- actualData, err := replicatedObj[0].MarshalJSON()
- require.NoError(t, err)
- require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects")
-}
-
-func TestECChunkRestoreNodeOff(t *testing.T) {
- // node0 has chunk0, node1 has chunk1, node2 has chunk2, node3 is out of netmap
- t.Parallel()
-
- payload := make([]byte, 64)
- rand.Read(payload)
- parentAddress := oidtest.Address()
- parentObject := objectSDK.New()
- parentObject.SetContainerID(parentAddress.Container())
- parentObject.SetPayload(payload)
- parentObject.SetPayloadSize(64)
- objectSDK.CalculateAndSetPayloadChecksum(parentObject)
- err := objectSDK.CalculateAndSetID(parentObject)
- require.NoError(t, err)
- id, _ := parentObject.ID()
- parentAddress.SetObject(id)
-
- chunkIDs := make([]oid.ID, 4)
- c, err := erasurecode.NewConstructor(3, 1)
- require.NoError(t, err)
- key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- chunks, err := c.Split(parentObject, &key.PrivateKey)
- require.NoError(t, err)
- for i, ch := range chunks {
- chunkIDs[i], _ = ch.ID()
- }
-
- var policy netmapSDK.PlacementPolicy
- require.NoError(t, policy.DecodeString("EC 3.1"))
-
- cnr := &container.Container{}
- cnr.Value.Init()
- cnr.Value.SetPlacementPolicy(policy)
- containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
- if id.Equals(parentAddress.Container()) {
- return cnr, nil
- }
- return nil, new(apistatus.ContainerNotFound)
- },
- }
-
- nodes := make([]netmapSDK.NodeInfo, 3)
- for i := range nodes {
- nodes[i].SetPublicKey([]byte{byte(i)})
- }
-
- placementBuilder := func(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
- if cnr.Equals(parentAddress.Container()) && obj.Equals(parentAddress.Object()) {
- return [][]netmapSDK.NodeInfo{nodes}, nil
- }
- return nil, errors.New("unexpected placement build")
- }
- remoteHeadFn := func(_ context.Context, ni netmapSDK.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
- require.True(t, raw, "remote header for parent object must be called with raw flag")
- index := int(ni.PublicKey()[0])
- require.True(t, index == 1 || index == 2, "invalid node to get parent header")
- require.True(t, a == parentAddress, "invalid address to get remote header")
- if index == 1 {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[1])
- ch.Index = uint32(1)
- ch.Total = 4
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
- if index == 2 {
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[2])
- ch.Index = uint32(2)
- ch.Total = 4
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- return nil, new(apistatus.ObjectNotFound)
- }
-
- localHeadFn := func(_ context.Context, a oid.Address) (*objectSDK.Object, error) {
- require.True(t, a == parentAddress, "invalid address to get remote header")
- ei := objectSDK.NewECInfo()
- var ch objectSDK.ECChunk
- ch.SetID(chunkIDs[0])
- ch.Index = uint32(0)
- ch.Total = 4
- ei.AddChunk(ch)
- return nil, objectSDK.NewECInfoError(ei)
- }
-
- var replicatedObj []*objectSDK.Object
- p := New(
- WithContainerSource(containerSrc),
- WithPlacementBuilder(placementBuilderFunc(placementBuilder)),
- WithNetmapKeys(announcedKeysFunc(func(k []byte) bool {
- return bytes.Equal(k, nodes[0].PublicKey())
- })),
- WithRemoteObjectHeaderFunc(remoteHeadFn),
- WithLocalObjectHeaderFunc(localHeadFn),
- WithReplicator(&testReplicator{
- handleLocalPutTask: func(ctx context.Context, task replicator.Task) {
- if task.Obj != nil {
- replicatedObj = append(replicatedObj, task.Obj)
- }
- },
- }),
- WithLocalObjectGetFunc(func(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
- require.True(t, a.Container() == parentAddress.Container() && a.Object() == chunkIDs[0], "invalid local object request")
- return chunks[0], nil
- }),
- WithRemoteObjectGetFunc(func(ctx context.Context, ni netmapSDK.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
- index := ni.PublicKey()[0]
- return chunks[index], nil
- }),
- WithPool(testPool(t)),
- WithKeyStorage(util.NewKeyStorage(&key.PrivateKey, nil, nil)),
- )
-
- var chunkAddress oid.Address
- chunkAddress.SetContainer(parentAddress.Container())
- chunkAddress.SetObject(chunkIDs[0])
- objInfo := objectcore.Info{
- Address: chunkAddress,
- Type: objectSDK.TypeRegular,
- ECInfo: &objectcore.ECInfo{
- ParentID: parentAddress.Object(),
- Index: 0,
- Total: 4,
- },
- }
- err = p.processObject(context.Background(), objInfo)
- require.NoError(t, err)
-
- require.Equal(t, 1, len(replicatedObj), "invalid replicated objects count")
- chunks[3].SetSignature(nil)
- expectedData, err := chunks[3].MarshalJSON()
- require.NoError(t, err)
- replicatedObj[0].SetSignature(nil)
- actualData, err := replicatedObj[0].MarshalJSON()
- require.NoError(t, err)
- require.EqualValues(t, string(expectedData), string(actualData), "invalid restored objects")
-}
diff --git a/pkg/services/policer/metrics.go b/pkg/services/policer/metrics.go
deleted file mode 100644
index c2ad2b0b5..000000000
--- a/pkg/services/policer/metrics.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package policer
-
-type MetricsRegister interface {
- IncProcessedObjects()
-}
-
-type noopMetrics struct{}
-
-func (noopMetrics) IncProcessedObjects() {}
diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go
index c2157de5d..cd47cb0fc 100644
--- a/pkg/services/policer/nodecache.go
+++ b/pkg/services/policer/nodecache.go
@@ -8,9 +8,6 @@ const (
nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject
nodeHoldsObject
- nodeStatusUnknown
- nodeIsUnderMaintenance
- nodeIsLocal
)
func (st nodeProcessStatus) Processed() bool {
@@ -18,19 +15,37 @@ func (st nodeProcessStatus) Processed() bool {
}
// nodeCache tracks Policer's check progress.
-type nodeCache map[uint64]nodeProcessStatus
+type nodeCache map[uint64]bool
func newNodeCache() nodeCache {
- return make(map[uint64]nodeProcessStatus)
+ return make(map[uint64]bool)
}
-func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
+func (n nodeCache) set(node netmap.NodeInfo, val bool) {
n[node.Hash()] = val
}
+// submits storage node as a candidate to store the object replica in case of
+// shortage.
+func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
+ n.set(node, false)
+}
+
+// submits storage node as a current object replica holder.
+func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
+ n.set(node, true)
+}
+
// processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
- return n[node.Hash()]
+ switch val, ok := n[node.Hash()]; {
+ case !ok:
+ return nodeNotProcessed
+ case val:
+ return nodeHoldsObject
+ default:
+ return nodeDoesNotHoldObject
+ }
}
// SubmitSuccessfulReplication marks given storage node as a current object
@@ -38,5 +53,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
//
// SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
- n.set(node, nodeHoldsObject)
+ n.submitReplicaHolder(node)
}
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 5d59604c2..5058b026b 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -23,7 +22,7 @@ import (
// Note that the underlying implementation might be circular: i.e. it can restart
// when the end of the key space is reached.
type KeySpaceIterator interface {
- Next(context.Context, uint32) ([]objectcore.Info, error)
+ Next(context.Context, uint32) ([]objectcore.AddressWithType, error)
Rewind()
}
@@ -36,20 +35,11 @@ type BuryFunc func(context.Context, oid.Address) error
// Replicator is the interface to a consumer of replication tasks.
type Replicator interface {
- HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult)
- HandlePullTask(ctx context.Context, task replicator.Task)
- HandleLocalPutTask(ctx context.Context, task replicator.Task)
+ HandleTask(ctx context.Context, task replicator.Task, res replicator.TaskResult)
}
// RemoteObjectHeaderFunc is the function to obtain HEAD info from a specific remote node.
-type RemoteObjectHeaderFunc func(context.Context, netmapSDK.NodeInfo, oid.Address, bool) (*objectSDK.Object, error)
-
-// LocalObjectHeaderFunc is the function to obtain HEAD info from the current node.
-type LocalObjectHeaderFunc func(context.Context, oid.Address) (*objectSDK.Object, error)
-
-type RemoteObjectGetFunc func(context.Context, netmapSDK.NodeInfo, oid.Address) (*objectSDK.Object, error)
-
-type LocalObjectGetFunc func(context.Context, oid.Address) (*objectSDK.Object, error)
+type RemoteObjectHeaderFunc func(context.Context, netmapSDK.NodeInfo, oid.Address) (*objectSDK.Object, error)
type cfg struct {
headTimeout time.Duration
@@ -66,8 +56,6 @@ type cfg struct {
remoteHeader RemoteObjectHeaderFunc
- localHeader LocalObjectHeaderFunc
-
netmapKeys netmap.AnnouncedKeys
replicator Replicator
@@ -76,27 +64,21 @@ type cfg struct {
taskPool *ants.Pool
+ maxCapacity int
+
batchSize, cacheSize uint32
- evictDuration, sleepDuration time.Duration
-
- metrics MetricsRegister
-
- remoteObject RemoteObjectGetFunc
-
- localObject LocalObjectGetFunc
-
- keyStorage *util.KeyStorage
+ rebalanceFreq, evictDuration, sleepDuration time.Duration
}
func defaultCfg() *cfg {
return &cfg{
- log: logger.NewLoggerWrapper(zap.L()),
+ log: &logger.Logger{Logger: zap.L()},
batchSize: 10,
cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB
+ rebalanceFreq: 1 * time.Second,
sleepDuration: 1 * time.Second,
evictDuration: 30 * time.Second,
- metrics: noopMetrics{},
}
}
@@ -143,32 +125,13 @@ func WithPlacementBuilder(v placement.Builder) Option {
}
}
-// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
+// WithRemoteObjectHeader returns option to set object header receiver of Policer.
func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
return func(c *cfg) {
c.remoteHeader = v
}
}
-// WithLocalObjectHeaderFunc returns option to set local object header receiver of Policer.
-func WithLocalObjectHeaderFunc(v LocalObjectHeaderFunc) Option {
- return func(c *cfg) {
- c.localHeader = v
- }
-}
-
-func WithRemoteObjectGetFunc(v RemoteObjectGetFunc) Option {
- return func(c *cfg) {
- c.remoteObject = v
- }
-}
-
-func WithLocalObjectGetFunc(v LocalObjectGetFunc) Option {
- return func(c *cfg) {
- c.localObject = v
- }
-}
-
// WithNetmapKeys returns option to set tool to work with announced public keys.
func WithNetmapKeys(v netmap.AnnouncedKeys) Option {
return func(c *cfg) {
@@ -192,6 +155,14 @@ func WithRedundantCopyCallback(cb RedundantCopyCallback) Option {
}
}
+// WithMaxCapacity returns option to set max capacity
+// that can be set to the pool.
+func WithMaxCapacity(capacity int) Option {
+ return func(c *cfg) {
+ c.maxCapacity = capacity
+ }
+}
+
// WithPool returns option to set pool for
// policy and replication operations.
func WithPool(p *ants.Pool) Option {
@@ -199,16 +170,3 @@ func WithPool(p *ants.Pool) Option {
c.taskPool = p
}
}
-
-// WithMetrics returns option to set metrics.
-func WithMetrics(m MetricsRegister) Option {
- return func(c *cfg) {
- c.metrics = m
- }
-}
-
-func WithKeyStorage(ks *util.KeyStorage) Option {
- return func(c *cfg) {
- c.keyStorage = ks
- }
-}
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index c91e7cc7c..a68b194d4 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,13 +1,13 @@
package policer
import (
- "fmt"
"sync"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru/v2"
+ "go.uber.org/zap"
)
type objectsInWork struct {
@@ -55,14 +55,18 @@ func New(opts ...Option) *Policer {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
+
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
- assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize))
+ if err != nil {
+ panic(err)
+ }
return &Policer{
cfg: c,
cache: cache,
objsInWork: &objectsInWork{
- objs: make(map[oid.Address]struct{}, c.taskPool.Cap()),
+ objs: make(map[oid.Address]struct{}, c.maxCapacity),
},
}
}
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index 049c33753..c73d33629 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"errors"
- "slices"
"sort"
"testing"
"time"
@@ -27,7 +26,7 @@ import (
func TestBuryObjectWithoutContainer(t *testing.T) {
// Key space
addr := oidtest.Address()
- objs := []objectcore.Info{
+ objs := []objectcore.AddressWithType{
{
Address: addr,
Type: objectSDK.TypeRegular,
@@ -37,10 +36,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
// Container source and bury function
buryCh := make(chan oid.Address)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -49,12 +48,16 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
return nil
}
+ // Task pool
+ pool, err := ants.NewPool(4)
+ require.NoError(t, err)
+
// Policer instance
p := New(
WithKeySpaceIterator(&sliceKeySpaceIterator{objs: objs}),
WithContainerSource(containerSrc),
WithBuryFunc(buryFn),
- WithPool(testPool(t)),
+ WithPool(pool),
)
ctx, cancel := context.WithCancel(context.Background())
@@ -79,8 +82,6 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int
wantRemoveRedundant bool
wantReplicateTo []int
- headResult map[int]error
- ecInfo *objectcore.ECInfo
}{
{
desc: "1 copy already held by local node",
@@ -129,7 +130,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2,
policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}},
- wantReplicateTo: []int{1},
+ wantReplicateTo: []int{1, 1}, // is this actually good?
},
{
desc: "lock object must be replicated to all nodes",
@@ -147,38 +148,6 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1},
maintenanceNodes: []int{2},
},
- {
- desc: "preserve local copy when node response with MAINTENANCE",
- nodeCount: 3,
- policy: `REP 2`,
- placement: [][]int{{1, 2}},
- objHolders: []int{1},
- headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
- },
- {
- desc: "lock object must be replicated to all EC nodes",
- objType: objectSDK.TypeLock,
- nodeCount: 3,
- policy: `EC 1.1`,
- placement: [][]int{{0, 1, 2}},
- wantReplicateTo: []int{1, 2},
- },
- {
- desc: "tombstone object must be replicated to all EC nodes",
- objType: objectSDK.TypeTombstone,
- nodeCount: 3,
- policy: `EC 1.1`,
- placement: [][]int{{0, 1, 2}},
- wantReplicateTo: []int{1, 2},
- },
- {
- desc: "do not remove local copy when MAINTENANCE status is cached",
- objType: objectSDK.TypeRegular,
- nodeCount: 3,
- policy: `REP 1 REP 1`,
- placement: [][]int{{1, 2}, {1, 0}},
- headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
- },
}
for i := range tests {
@@ -192,7 +161,7 @@ func TestProcessObject(t *testing.T) {
nodes[i].SetPublicKey([]byte{byte(i)})
}
for _, i := range ti.maintenanceNodes {
- nodes[i].SetStatus(netmap.Maintenance)
+ nodes[i].SetMaintenance()
}
var policy netmap.PlacementPolicy
@@ -208,28 +177,22 @@ func TestProcessObject(t *testing.T) {
if cnr.Equals(addr.Container()) && obj != nil && obj.Equals(addr.Object()) {
return placementVectors, nil
}
- if ti.ecInfo != nil && cnr.Equals(addr.Container()) && obj != nil && obj.Equals(ti.ecInfo.ParentID) {
- return placementVectors, nil
- }
t.Errorf("unexpected placement build: cid=%v oid=%v", cnr, obj)
return nil, errors.New("unexpected placement build")
}
// Object remote header
- headFn := func(_ context.Context, ni netmap.NodeInfo, a oid.Address, raw bool) (*objectSDK.Object, error) {
+ headFn := func(_ context.Context, ni netmap.NodeInfo, a oid.Address) (*objectSDK.Object, error) {
index := int(ni.PublicKey()[0])
if a != addr || index < 1 || index >= ti.nodeCount {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head")
}
- if ti.headResult != nil {
- if err, ok := ti.headResult[index]; ok {
- return nil, err
+ for _, i := range ti.objHolders {
+ if index == i {
+ return nil, nil
}
}
- if slices.Contains(ti.objHolders, index) {
- return nil, nil
- }
return nil, new(apistatus.ObjectNotFound)
}
@@ -238,14 +201,14 @@ func TestProcessObject(t *testing.T) {
cnr.Value.Init()
cnr.Value.SetPlacementPolicy(policy)
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
if id.Equals(addr.Container()) {
return cnr, nil
}
t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container())
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -270,21 +233,17 @@ func TestProcessObject(t *testing.T) {
require.True(t, a.Equals(addr), "unexpected redundant copy callback: a=%v", a)
gotRemoveRedundant = true
}),
- WithReplicator(&testReplicator{
- handleReplicationTask: func(_ context.Context, task replicator.Task, res replicator.TaskResult) {
- require.True(t, task.Addr.Equals(addr), "unexpected replicator task: %+v", task)
- for _, node := range task.Nodes {
- gotReplicateTo = append(gotReplicateTo, int(node.PublicKey()[0]))
- }
- },
- }),
- WithPool(testPool(t)),
+ WithReplicator(replicatorFunc(func(_ context.Context, task replicator.Task, res replicator.TaskResult) {
+ require.True(t, task.Addr.Equals(addr), "unexpected replicator task: %+v", task)
+ for _, node := range task.Nodes {
+ gotReplicateTo = append(gotReplicateTo, int(node.PublicKey()[0]))
+ }
+ })),
)
- addrWithType := objectcore.Info{
+ addrWithType := objectcore.AddressWithType{
Address: addr,
Type: ti.objType,
- ECInfo: ti.ecInfo,
}
err := p.processObject(context.Background(), addrWithType)
@@ -303,10 +262,10 @@ func TestProcessObjectError(t *testing.T) {
cnr := &container.Container{}
cnr.Value.Init()
source := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return nil, new(apistatus.ContainerNotFound)
},
}
@@ -317,10 +276,9 @@ func TestProcessObjectError(t *testing.T) {
p := New(
WithContainerSource(source),
WithBuryFunc(buryFn),
- WithPool(testPool(t)),
)
- addrWithType := objectcore.Info{
+ addrWithType := objectcore.AddressWithType{
Address: addr,
}
@@ -329,7 +287,7 @@ func TestProcessObjectError(t *testing.T) {
func TestIteratorContract(t *testing.T) {
addr := oidtest.Address()
- objs := []objectcore.Info{{
+ objs := []objectcore.AddressWithType{{
Address: addr,
Type: objectSDK.TypeRegular,
}}
@@ -338,6 +296,9 @@ func TestIteratorContract(t *testing.T) {
return nil
}
+ pool, err := ants.NewPool(4)
+ require.NoError(t, err)
+
it := &predefinedIterator{
scenario: []nextResult{
{objs, nil},
@@ -351,10 +312,10 @@ func TestIteratorContract(t *testing.T) {
}
containerSrc := containerSrc{
- get: func(ctx context.Context, id cid.ID) (*container.Container, error) {
+ get: func(id cid.ID) (*container.Container, error) {
return nil, new(apistatus.ContainerNotFound)
},
- deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
+ deletionInfo: func(id cid.ID) (*container.DelInfo, error) {
return &container.DelInfo{}, nil
},
}
@@ -363,7 +324,7 @@ func TestIteratorContract(t *testing.T) {
WithKeySpaceIterator(it),
WithContainerSource(containerSrc),
WithBuryFunc(buryFn),
- WithPool(testPool(t)),
+ WithPool(pool),
func(c *cfg) {
c.sleepDuration = time.Millisecond
},
@@ -387,14 +348,8 @@ func TestIteratorContract(t *testing.T) {
}, it.calls)
}
-func testPool(t *testing.T) *ants.Pool {
- pool, err := ants.NewPool(4)
- require.NoError(t, err)
- return pool
-}
-
type nextResult struct {
- objs []objectcore.Info
+ objs []objectcore.AddressWithType
err error
}
@@ -405,7 +360,7 @@ type predefinedIterator struct {
calls []string
}
-func (it *predefinedIterator) Next(ctx context.Context, size uint32) ([]objectcore.Info, error) {
+func (it *predefinedIterator) Next(ctx context.Context, size uint32) ([]objectcore.AddressWithType, error) {
if it.pos == len(it.scenario) {
close(it.finishCh)
<-ctx.Done()
@@ -424,15 +379,18 @@ func (it *predefinedIterator) Rewind() {
// sliceKeySpaceIterator is a KeySpaceIterator backed by a slice.
type sliceKeySpaceIterator struct {
- objs []objectcore.Info
+ objs []objectcore.AddressWithType
cur int
}
-func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectcore.Info, error) {
+func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectcore.AddressWithType, error) {
if it.cur >= len(it.objs) {
return nil, engine.ErrEndOfListing
}
- end := min(it.cur+int(size), len(it.objs))
+ end := it.cur + int(size)
+ if end > len(it.objs) {
+ end = len(it.objs)
+ }
ret := it.objs[it.cur:end]
it.cur = end
return ret, nil
@@ -443,22 +401,18 @@ func (it *sliceKeySpaceIterator) Rewind() {
}
type containerSrc struct {
- get func(ctx context.Context, id cid.ID) (*container.Container, error)
- deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error)
+ get func(id cid.ID) (*container.Container, error)
+ deletionInfo func(id cid.ID) (*container.DelInfo, error)
}
-func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
- return f.get(ctx, id)
-}
+func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) }
-func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
- return f.deletionInfo(ctx, id)
-}
+func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) }
// placementBuilderFunc is a placement.Builder backed by a function
type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
-func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
+func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
return f(c, o, p)
}
@@ -467,20 +421,9 @@ type announcedKeysFunc func([]byte) bool
func (f announcedKeysFunc) IsLocalKey(k []byte) bool { return f(k) }
-type testReplicator struct {
- handleReplicationTask func(ctx context.Context, task replicator.Task, res replicator.TaskResult)
- handleLocalPutTask func(ctx context.Context, task replicator.Task)
- handlePullTask func(ctx context.Context, task replicator.Task)
-}
+// replicatorFunc is a Replicator backed by a function.
+type replicatorFunc func(context.Context, replicator.Task, replicator.TaskResult)
-func (r *testReplicator) HandleReplicationTask(ctx context.Context, task replicator.Task, res replicator.TaskResult) {
- r.handleReplicationTask(ctx, task, res)
-}
-
-func (r *testReplicator) HandleLocalPutTask(ctx context.Context, task replicator.Task) {
- r.handleLocalPutTask(ctx, task)
-}
-
-func (r *testReplicator) HandlePullTask(ctx context.Context, task replicator.Task) {
- r.handlePullTask(ctx, task)
+func (f replicatorFunc) HandleTask(ctx context.Context, task replicator.Task, res replicator.TaskResult) {
+ f(ctx, task, res)
}
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index 635a5683b..2e8fe9295 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -3,28 +3,22 @@ package policer
import (
"context"
"errors"
- "sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
- p.log.Info(ctx, logs.PolicerRoutineStopped)
+ p.log.Info(logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String())
for {
select {
case <-ctx.Done():
- p.taskPool.Release()
return
default:
}
@@ -36,14 +30,12 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+ p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
- skipMap := newSkipMap()
for i := range addrs {
select {
case <-ctx.Done():
- p.taskPool.Release()
return
default:
addr := addrs[i]
@@ -61,53 +53,19 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
if p.objsInWork.add(addr.Address) {
err := p.processObject(ctx, addr)
- if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
- p.log.Error(ctx, logs.PolicerUnableToProcessObj,
+ if err != nil {
+ p.log.Error(logs.PolicerUnableToProcessObj,
zap.Stringer("object", addr.Address),
- zap.Error(err))
+ zap.String("error", err.Error()))
}
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
- p.metrics.IncProcessedObjects()
}
})
if err != nil {
- p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
+ p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
}
}
-
-type errMap struct {
- sync.Mutex
- skipMap map[cid.ID][]error
-}
-
-func newSkipMap() *errMap {
- return &errMap{
- skipMap: make(map[cid.ID][]error),
- }
-}
-
-// addSeenError marks err as seen error for the container.
-// Returns true is the error has already been added.
-func (m *errMap) addSeenError(cnr cid.ID, err error) bool {
- m.Lock()
- defer m.Unlock()
-
- for _, e := range m.skipMap[cnr] {
- if errors.Is(err, e) {
- return true
- }
- }
-
- // Restrict list length to avoid possible OOM if some random error is added in future.
- const maxErrListLength = 10
-
- lst := m.skipMap[cnr]
- if len(lst) < maxErrListLength {
- m.skipMap[cnr] = append(lst, err)
- }
- return false
-}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 8c6f0df06..3a46e5f04 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -5,7 +5,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
@@ -20,18 +21,18 @@ type TaskResult interface {
SubmitSuccessfulReplication(netmap.NodeInfo)
}
-// HandleReplicationTask executes replication task inside invoking goroutine.
+// HandleTask executes replication task inside invoking goroutine.
// Passes all the nodes that accepted the replication to the TaskResult.
-func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res TaskResult) {
+func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) {
p.metrics.IncInFlightRequest()
defer p.metrics.DecInFlightRequest()
defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork,
+ p.log.Debug(logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.NumCopies),
)
}()
- ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleReplicateTask",
+ ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleTask",
trace.WithAttributes(
attribute.Stringer("address", task.Addr),
attribute.Int64("number_of_copies", int64(task.NumCopies)),
@@ -42,15 +43,16 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
var err error
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.Addr),
- zap.Error(err))
+ zap.Error(err),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
}
- prm := new(objectwriter.RemotePutPrm).
+ prm := new(putsvc.RemotePutPrm).
WithObject(task.Obj)
for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ {
@@ -63,6 +65,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
log := p.log.With(
zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])),
zap.Stringer("object", task.Addr),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
@@ -72,11 +75,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
cancel()
if err != nil {
- log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
- zap.Error(err),
+ log.Error(logs.ReplicatorCouldNotReplicateObject,
+ zap.String("error", err.Error()),
)
} else {
- log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
+ log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
task.NumCopies--
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
deleted file mode 100644
index 216fe4919..000000000
--- a/pkg/services/replicator/pull.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package replicator
-
-import (
- "context"
- "errors"
- "slices"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
-)
-
-var errFailedToGetObjectFromAnyNode = errors.New("failed to get object from any node")
-
-func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
- p.metrics.IncInFlightRequest()
- defer p.metrics.DecInFlightRequest()
- defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
- trace.WithAttributes(
- attribute.Stringer("address", task.Addr),
- attribute.Int("nodes_count", len(task.Nodes)),
- ))
- defer span.End()
-
- var obj *objectSDK.Object
-
- for _, node := range task.Nodes {
- var err error
- obj, err = p.remoteGetter.Get(ctx, getsvc.RemoteGetPrm{
- Address: task.Addr,
- Node: node,
- })
- if err == nil {
- break
- }
- endpoints := slices.Collect(node.NetworkEndpoints())
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
- zap.Stringer("object", task.Addr),
- zap.Error(err),
- zap.Strings("endpoints", endpoints))
- }
-
- if obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
- zap.Stringer("object", task.Addr),
- zap.Error(errFailedToGetObjectFromAnyNode))
- return
- }
-
- err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
- if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
- zap.Stringer("object", task.Addr),
- zap.Error(err))
- }
-}
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
deleted file mode 100644
index bcad8471d..000000000
--- a/pkg/services/replicator/put.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package replicator
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
-)
-
-var errObjectNotDefined = errors.New("object is not defined")
-
-func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
- p.metrics.IncInFlightRequest()
- defer p.metrics.DecInFlightRequest()
- defer func() {
- p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
- }()
-
- ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
- trace.WithAttributes(
- attribute.Stringer("address", task.Addr),
- attribute.Int("nodes_count", len(task.Nodes)),
- ))
- defer span.End()
-
- if task.Obj == nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
- zap.Stringer("object", task.Addr),
- zap.Error(errObjectNotDefined))
- return
- }
-
- err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
- if err != nil {
- p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
- zap.Stringer("object", task.Addr),
- zap.Error(err))
- }
-}
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index a940cef37..bb817cb32 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -4,9 +4,9 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
- getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "go.uber.org/zap"
)
// Replicator represents the utility that replicates
@@ -23,9 +23,7 @@ type cfg struct {
log *logger.Logger
- remoteSender *objectwriter.RemoteSender
-
- remoteGetter *getsvc.RemoteGetter
+ remoteSender *putsvc.RemoteSender
localStorage *engine.StorageEngine
@@ -44,6 +42,8 @@ func New(opts ...Option) *Replicator {
opts[i](c)
}
+ c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
+
return &Replicator{
cfg: c,
}
@@ -64,18 +64,12 @@ func WithLogger(v *logger.Logger) Option {
}
// WithRemoteSender returns option to set remote object sender of Replicator.
-func WithRemoteSender(v *objectwriter.RemoteSender) Option {
+func WithRemoteSender(v *putsvc.RemoteSender) Option {
return func(c *cfg) {
c.remoteSender = v
}
}
-func WithRemoteGetter(v *getsvc.RemoteGetter) Option {
- return func(c *cfg) {
- c.remoteGetter = v
- }
-}
-
// WithLocalStorage returns option to set local object storage of Replicator.
func WithLocalStorage(v *engine.StorageEngine) Option {
return func(c *cfg) {
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index a03f8dcaa..d2b5b2506 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,7 +1,6 @@
package replicator
import (
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -17,6 +16,4 @@ type Task struct {
Obj *objectSDK.Object
// Nodes is a list of potential object holders.
Nodes []netmap.NodeInfo
-
- Container containerSDK.Container
}
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index f0591de71..76c220fab 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -4,10 +4,10 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"go.uber.org/zap"
)
@@ -33,7 +33,10 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create"))
+ s.log.Debug(logs.ServingRequest,
+ zap.String("component", "SessionService"),
+ zap.String("request", "Create"),
+ )
respBody, err := s.exec.Create(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go
index e8555a7c9..9e44ae667 100644
--- a/pkg/services/session/server.go
+++ b/pkg/services/session/server.go
@@ -3,7 +3,7 @@ package session
import (
"context"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
)
// Server is an interface of the FrostFS API Session service server.
diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go
index 3664c1403..690fff896 100644
--- a/pkg/services/session/sign.go
+++ b/pkg/services/session/sign.go
@@ -4,8 +4,8 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
)
type signService struct {
diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go
index ea0233f9a..21f55a7d1 100644
--- a/pkg/services/session/storage/persistent/executor.go
+++ b/pkg/services/session/storage/persistent/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.etcd.io/bbolt"
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index f80ecb591..860b95897 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -8,8 +8,8 @@ import (
"path/filepath"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
@@ -22,7 +22,7 @@ func TestTokenStore(t *testing.T) {
defer ts.Close()
- owner := usertest.ID()
+ owner := *usertest.ID()
var ownerV2 refs.OwnerID
owner.WriteToV2(&ownerV2)
@@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) {
tokens := make([]tok, 0, tokenNumber)
- for i := range tokenNumber {
+ for i := 0; i < tokenNumber; i++ {
req.SetExpiration(uint64(i))
res, err := ts.Create(context.Background(), req)
@@ -66,7 +66,7 @@ func TestTokenStore_Persistent(t *testing.T) {
ts, err := NewTokenStore(path)
require.NoError(t, err)
- idOwner := usertest.ID()
+ idOwner := *usertest.ID()
var idOwnerV2 refs.OwnerID
idOwner.WriteToV2(&idOwnerV2)
@@ -127,7 +127,7 @@ func TestTokenStore_RemoveOld(t *testing.T) {
defer ts.Close()
- owner := usertest.ID()
+ owner := *usertest.ID()
var ownerV2 refs.OwnerID
owner.WriteToV2(&ownerV2)
@@ -166,14 +166,14 @@ func TestTokenStore_RemoveOld(t *testing.T) {
//
// If this test is passing, TokenStore works correctly.
func TestBolt_Cursor(t *testing.T) {
- db, err := bbolt.Open(filepath.Join(t.TempDir(), ".storage"), 0o666, nil)
+ db, err := bbolt.Open(filepath.Join(t.TempDir(), ".storage"), 0666, nil)
require.NoError(t, err)
defer db.Close()
cursorKeys := make(map[string]struct{})
- bucketName := []byte("bucket")
+ var bucketName = []byte("bucket")
err = db.Update(func(tx *bbolt.Tx) (err error) {
b, err := tx.CreateBucket(bucketName)
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 60db97f90..411734ea1 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
func defaultCfg() *cfg {
return &cfg{
- l: logger.NewLoggerWrapper(zap.L()),
+ l: &logger.Logger{Logger: zap.L()},
timeout: 100 * time.Millisecond,
}
}
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 132d62445..e4c2091c0 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,7 +1,6 @@
package persistent
import (
- "context"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
@@ -41,7 +40,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
o(cfg)
}
- db, err := bbolt.Open(path, 0o600,
+ db, err := bbolt.Open(path, 0600,
&bbolt.Options{
Timeout: cfg.timeout,
})
@@ -64,7 +63,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
- rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
+ rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)
@@ -106,7 +105,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return err
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
+ s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -131,7 +130,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
+ s.l.Error(logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -142,7 +141,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
+ s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go
index 423e579d7..cd498709c 100644
--- a/pkg/services/session/storage/temporary/executor.go
+++ b/pkg/services/session/storage/temporary/executor.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody)
s.mtx.Lock()
s.tokens[key{
tokenID: base58.Encode(uidBytes),
- ownerID: id.EncodeToString(),
+ ownerID: base58.Encode(id.WalletBytes()),
}] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration())
s.mtx.Unlock()
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index c9da6b842..ee93dee71 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,9 +9,7 @@ import (
)
type key struct {
- // nolint:unused
tokenID string
- // nolint:unused
ownerID string
}
@@ -41,7 +39,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken
s.mtx.RLock()
t := s.tokens[key{
tokenID: base58.Encode(tokenID),
- ownerID: ownerID.EncodeToString(),
+ ownerID: base58.Encode(ownerID.WalletBytes()),
}]
s.mtx.RUnlock()
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
deleted file mode 100644
index 58757ff6d..000000000
--- a/pkg/services/tree/ape.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package tree
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "net"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter"
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "google.golang.org/grpc/peer"
-)
-
-func (s *Service) newAPERequest(ctx context.Context, namespace string,
- cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
-) (aperequest.Request, error) {
- schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
- if err != nil {
- return aperequest.Request{}, err
- }
- schemaRole, err := converter.SchemaRoleFromACLRole(role)
- if err != nil {
- return aperequest.Request{}, err
- }
- reqProps := map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
- nativeschema.PropertyKeyActorRole: schemaRole,
- }
- reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey)
- if err != nil {
- return aperequest.Request{}, err
- }
- if p, ok := peer.FromContext(ctx); ok {
- if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
- reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
- }
- }
-
- var resourceName string
- if namespace == "root" || namespace == "" {
- resourceName = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString())
- } else {
- resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
- }
-
- resProps := map[string]string{
- nativeschema.ProperyKeyTreeID: treeID,
- }
-
- return aperequest.NewRequest(
- schemaMethod,
- aperequest.NewResource(resourceName, resProps),
- reqProps,
- ), nil
-}
-
-func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
- container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
-) error {
- namespace := ""
- cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
- if hasNamespace {
- namespace = cntNamespace
- }
-
- request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey)
- if err != nil {
- return fmt.Errorf("failed to create ape request: %w", err)
- }
-
- return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{
- Request: request,
- Namespace: namespace,
- Container: cid,
- ContainerOwner: container.Value.Owner(),
- PublicKey: publicKey,
- BearerToken: bt,
- })
-}
-
-// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
-func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) {
- if reqProps == nil {
- reqProps = make(map[string]string)
- }
- props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey)
- if err != nil {
- return reqProps, err
- }
- for propertyName, properyValue := range props {
- reqProps[propertyName] = properyValue
- }
- return reqProps, nil
-}
diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go
deleted file mode 100644
index 7b209fd47..000000000
--- a/pkg/services/tree/ape_test.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package tree
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
- "github.com/stretchr/testify/require"
-)
-
-var (
- containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
-
- senderPrivateKey, _ = keys.NewPrivateKey()
-
- senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
-
- rootCnr = &core.Container{Value: containerSDK.Container{}}
-)
-
-type frostfsIDProviderMock struct {
- subjects map[util.Uint160]*client.Subject
- subjectsExtended map[util.Uint160]*client.SubjectExtended
-}
-
-func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) {
- v, ok := f.subjects[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) {
- v, ok := f.subjectsExtended[key]
- if !ok {
- return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
- }
- return v, nil
-}
-
-var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
-
-func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
- return &frostfsIDProviderMock{
- subjects: map[util.Uint160]*client.Subject{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- },
- },
- subjectsExtended: map[util.Uint160]*client.SubjectExtended{
- scriptHashFromSenderKey(t, senderKey): {
- Namespace: "testnamespace",
- Name: "test",
- KV: map[string]string{
- "tag-attr1": "value1",
- "tag-attr2": "value2",
- },
- Groups: []*client.Group{
- {
- ID: 1,
- Name: "test",
- Namespace: "testnamespace",
- KV: map[string]string{
- "attr1": "value1",
- "attr2": "value2",
- },
- },
- },
- },
- },
- }
-}
-
-func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
- pk, err := keys.NewPublicKeyFromString(senderKey)
- require.NoError(t, err)
- return pk.GetScriptHash()
-}
-
-type stMock struct{}
-
-func (m *stMock) CurrentEpoch() uint64 {
- return 8
-}
-
-func TestCheckAPE(t *testing.T) {
- cid := cid.ID{}
- _ = cid.DecodeString(containerID)
-
- t.Run("treeID rule", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.QuotaLimitReached,
- Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindResource,
- Key: nativeschema.ProperyKeyTreeID,
- Value: versionTreeID,
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey())
-
- var chErr *checkercore.ChainRouterError
- require.ErrorAs(t, err, &chErr)
- require.Equal(t, chain.QuotaLimitReached, chErr.Status())
- })
-
- t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringNotEquals,
- Kind: chain.KindResource,
- Key: nativeschema.PropertyKeyObjectType,
- Value: "TOMBSTONE",
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
- require.NoError(t, err)
- })
-
- t.Run("delete rule won't affect tree add", func(t *testing.T) {
- los := inmemory.NewInmemoryLocalStorage()
- mcs := inmemory.NewInmemoryMorphRuleChainStorage()
- fid := newFrostfsIDProviderMock(t)
- s := Service{
- cfg: cfg{
- frostfsidSubjectProvider: fid,
- },
- apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
- }
-
- los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.Allow,
- Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: chain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
- },
- Condition: []chain.Condition{
- {
- Op: chain.CondStringNotEquals,
- Kind: chain.KindResource,
- Key: nativeschema.PropertyKeyObjectType,
- Value: "TOMBSTONE",
- },
- },
- },
- },
- MatchType: chain.MatchTypeFirstMatch,
- })
-
- err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
- require.NoError(t, err)
- })
-}
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index a11700771..f50aa0b0d 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -2,24 +2,23 @@ package tree
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
"sync"
"time"
- internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
+ tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
- key *ecdsa.PrivateKey
- ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -35,20 +34,18 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
- l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
+func (c *clientCache) init() {
+ l, _ := simplelru.NewLRU[string, cacheItem](defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
}
})
c.LRU = *l
- c.key = pk
- c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
- ccInt, ok := c.Get(netmapAddr)
+ ccInt, ok := c.LRU.Get(netmapAddr)
c.Unlock()
if ok {
@@ -66,19 +63,14 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- var netAddr network.Address
- if err := netAddr.FromString(netmapAddr); err != nil {
- return nil, err
- }
-
- cc, err := dialTreeService(ctx, netAddr, c.key, c.ds)
+ cc, err := dialTreeService(ctx, netmapAddr)
lastTry := time.Now()
c.Lock()
if err != nil {
- c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
- c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
+ c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()
@@ -88,3 +80,32 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
+
+func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
+ var netAddr network.Address
+ if err := netAddr.FromString(netmapAddr); err != nil {
+ return nil, err
+ }
+
+ opts := []grpc.DialOption{
+ grpc.WithBlock(),
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ }
+
+ if !netAddr.IsTLSEnabled() {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
+ cancel()
+
+ return cc, err
+}
diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go
index c641a21a2..435257550 100644
--- a/pkg/services/tree/container.go
+++ b/pkg/services/tree/container.go
@@ -2,7 +2,6 @@ package tree
import (
"bytes"
- "context"
"crypto/sha256"
"fmt"
"sync"
@@ -33,13 +32,13 @@ type containerCacheItem struct {
const defaultContainerCacheSize = 10
// getContainerNodes returns nodes in the container and a position of local key in the list.
-func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
- nm, err := s.nmSource.GetNetMap(ctx, 0)
+func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) {
+ nm, err := s.nmSource.GetNetMap(0)
if err != nil {
return nil, -1, fmt.Errorf("can't get netmap: %w", err)
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return nil, -1, fmt.Errorf("can't get container: %w", err)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index e7a13827e..88a5b5e06 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -4,8 +4,7 @@ import (
"context"
"errors"
"path"
- "path/filepath"
- "slices"
+ "sort"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -34,8 +33,7 @@ func TestGetSubTree(t *testing.T) {
for i := range tree {
path := tree[i].path
meta := []pilorama.KeyValue{
- {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])},
- }
+ {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}}
lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
require.NoError(t, err)
@@ -48,7 +46,7 @@ func TestGetSubTree(t *testing.T) {
acc := subTreeAcc{errIndex: errIndex}
err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
TreeId: treeID,
- RootId: []uint64{rootID},
+ RootId: rootID,
Depth: depth,
}, p)
if errIndex == -1 {
@@ -58,12 +56,12 @@ func TestGetSubTree(t *testing.T) {
}
// GetSubTree must return child only after is has returned the parent.
- require.Equal(t, rootID, acc.seen[0].Body.NodeId[0])
+ require.Equal(t, rootID, acc.seen[0].Body.NodeId)
loop:
for i := 1; i < len(acc.seen); i++ {
parent := acc.seen[i].Body.ParentId
- for j := range i {
- if acc.seen[j].Body.NodeId[0] == parent[0] {
+ for j := 0; j < i; j++ {
+ if acc.seen[j].Body.NodeId == parent {
continue loop
}
}
@@ -73,16 +71,16 @@ func TestGetSubTree(t *testing.T) {
// GetSubTree must return valid meta.
for i := range acc.seen {
b := acc.seen[i].Body
- meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId[0])
+ meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId)
require.NoError(t, err)
- require.Equal(t, node, b.ParentId[0])
- require.Equal(t, meta.Time, b.Timestamp[0])
+ require.Equal(t, node, b.ParentId)
+ require.Equal(t, meta.Time, b.Timestamp)
require.Equal(t, metaToProto(meta.Items), b.Meta)
}
ordered := make([]uint64, len(acc.seen))
for i := range acc.seen {
- ordered[i] = acc.seen[i].Body.NodeId[0]
+ ordered[i] = acc.seen[i].Body.NodeId
}
return ordered
}
@@ -124,21 +122,9 @@ func TestGetSubTree(t *testing.T) {
}
func TestGetSubTreeOrderAsc(t *testing.T) {
- t.Run("memory forest", func(t *testing.T) {
- testGetSubTreeOrderAsc(t, pilorama.NewMemoryForest())
- })
-
- t.Run("boltdb forest", func(t *testing.T) {
- p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
- require.NoError(t, p.Open(context.Background(), 0o644))
- require.NoError(t, p.Init(context.Background()))
- testGetSubTreeOrderAsc(t, p)
- })
-}
-
-func testGetSubTreeOrderAsc(t *testing.T, p pilorama.ForestStorage) {
d := pilorama.CIDDescriptor{CID: cidtest.ID(), Size: 1}
treeID := "sometree"
+ p := pilorama.NewMemoryForest()
tree := []struct {
path []string
@@ -155,8 +141,7 @@ func testGetSubTreeOrderAsc(t *testing.T, p pilorama.ForestStorage) {
for i := range tree {
path := tree[i].path
meta := []pilorama.KeyValue{
- {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])},
- }
+ {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}}
lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
require.NoError(t, err)
@@ -164,70 +149,40 @@ func testGetSubTreeOrderAsc(t *testing.T, p pilorama.ForestStorage) {
tree[i].id = lm[0].Child
}
- t.Run("total", func(t *testing.T) {
- t.Skip()
- acc := subTreeAcc{errIndex: -1}
- err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
- TreeId: treeID,
- OrderBy: &GetSubTreeRequest_Body_Order{
- Direction: GetSubTreeRequest_Body_Order_Asc,
- },
- }, p)
- require.NoError(t, err)
- // GetSubTree must return child only after is has returned the parent.
- require.Equal(t, uint64(0), acc.seen[0].Body.NodeId)
+ acc := subTreeAcc{errIndex: -1}
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
+ TreeId: treeID,
+ OrderBy: &GetSubTreeRequest_Body_Order{
+ Direction: GetSubTreeRequest_Body_Order_Asc,
+ },
+ }, p)
+ require.NoError(t, err)
+ // GetSubTree must return child only after is has returned the parent.
+ require.Equal(t, uint64(0), acc.seen[0].Body.NodeId)
- paths := make([]string, 0, len(acc.seen))
- for i := range acc.seen {
- if i == 0 {
- continue
- }
- found := false
- for j := range tree {
- if acc.seen[i].Body.NodeId[0] == tree[j].id {
- found = true
- paths = append(paths, path.Join(tree[j].path...))
- }
- }
- require.True(t, found, "unknown node %d %v", i, acc.seen[i].GetBody().GetNodeId())
+ paths := make([]string, 0, len(acc.seen))
+ for i := range acc.seen {
+ if i == 0 {
+ continue
}
+ found := false
+ for j := range tree {
+ if acc.seen[i].Body.NodeId == tree[j].id {
+ found = true
+ paths = append(paths, path.Join(tree[j].path...))
+ }
+ }
+ require.True(t, found, "unknown node")
+ }
- require.True(t, slices.IsSorted(paths))
- })
- t.Run("depth=1", func(t *testing.T) {
- acc := subTreeAcc{errIndex: -1}
- err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
- TreeId: treeID,
- Depth: 1,
- OrderBy: &GetSubTreeRequest_Body_Order{
- Direction: GetSubTreeRequest_Body_Order_Asc,
- },
- }, p)
- require.NoError(t, err)
- require.Len(t, acc.seen, 1)
- require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0])
- })
- t.Run("depth=2", func(t *testing.T) {
- acc := subTreeAcc{errIndex: -1}
- err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
- TreeId: treeID,
- Depth: 2,
- OrderBy: &GetSubTreeRequest_Body_Order{
- Direction: GetSubTreeRequest_Body_Order_Asc,
- },
- }, p)
- require.NoError(t, err)
- require.Len(t, acc.seen, 3)
- require.Equal(t, uint64(0), acc.seen[0].Body.NodeId[0])
- require.Equal(t, uint64(0), acc.seen[1].GetBody().GetParentId()[0])
- require.Equal(t, uint64(0), acc.seen[2].GetBody().GetParentId()[0])
- })
+ require.True(t, sort.SliceIsSorted(paths, func(i, j int) bool {
+ return paths[i] < paths[j]
+ }))
}
var (
errSubTreeSend = errors.New("send finished with error")
errSubTreeSendAfterError = errors.New("send was invoked after an error occurred")
- errInvalidResponse = errors.New("send got invalid response")
)
type subTreeAcc struct {
@@ -240,16 +195,6 @@ type subTreeAcc struct {
var _ TreeService_GetSubTreeServer = &subTreeAcc{}
func (s *subTreeAcc) Send(r *GetSubTreeResponse) error {
- b := r.GetBody()
- if len(b.GetNodeId()) > 1 {
- return errInvalidResponse
- }
- if len(b.GetParentId()) > 1 {
- return errInvalidResponse
- }
- if len(b.GetTimestamp()) > 1 {
- return errInvalidResponse
- }
s.seen = append(s.seen, r)
if s.errIndex >= 0 {
if len(s.seen) == s.errIndex+1 {
diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go
index 07503f8c3..0f0e4ee57 100644
--- a/pkg/services/tree/metrics.go
+++ b/pkg/services/tree/metrics.go
@@ -6,7 +6,6 @@ type MetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
- AddOperation(string, string)
}
type defaultMetricsRegister struct{}
@@ -14,4 +13,3 @@ type defaultMetricsRegister struct{}
func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {}
func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {}
-func (defaultMetricsRegister) AddOperation(string, string) {}
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 56cbcc081..043e12cb2 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -1,55 +1,44 @@
package tree
import (
- "context"
"crypto/ecdsa"
- "sync/atomic"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type ContainerSource interface {
container.Source
- DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error)
+ DeletionInfo(cid.ID) (*container.DelInfo, error)
// List must return list of all the containers in the FrostFS network
// at the moment of a call and any error that does not allow fetching
// container information.
- List(ctx context.Context) ([]cid.ID, error)
+ List() ([]cid.ID, error)
}
type cfg struct {
- log *logger.Logger
- key *ecdsa.PrivateKey
- rawPub []byte
- state netmap.State
- nmSource netmap.Source
- cnrSource ContainerSource
- frostfsidSubjectProvider frostfsidcore.SubjectProvider
- forest pilorama.Forest
+ log *logger.Logger
+ key *ecdsa.PrivateKey
+ rawPub []byte
+ nmSource netmap.Source
+ cnrSource ContainerSource
+ eaclSource container.EACLSource
+ forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
replicatorWorkerCount int
replicatorTimeout time.Duration
containerCacheSize int
- authorizedKeys atomic.Pointer[[][]byte]
- syncBatchSize int
-
- localOverrideStorage policyengine.LocalOverrideStorage
- morphChainStorage policyengine.MorphRuleChainStorageReader
+ authorizedKeys [][]byte
metrics MetricsRegister
- ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -63,9 +52,11 @@ func WithContainerSource(src ContainerSource) Option {
}
}
-func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option {
+// WithEACLSource sets a eACL table source for a tree service.
+// This option is required.
+func WithEACLSource(src container.EACLSource) Option {
return func(c *cfg) {
- c.frostfsidSubjectProvider = provider
+ c.eaclSource = src
}
}
@@ -116,12 +107,6 @@ func WithReplicationWorkerCount(n int) Option {
}
}
-func WithSyncBatchSize(n int) Option {
- return func(c *cfg) {
- c.syncBatchSize = n
- }
-}
-
func WithContainerCacheSize(n int) Option {
return func(c *cfg) {
if n > 0 {
@@ -148,30 +133,9 @@ func WithMetrics(v MetricsRegister) Option {
// keys that have rights to use Tree service.
func WithAuthorizedKeys(keys keys.PublicKeys) Option {
return func(c *cfg) {
- c.authorizedKeys.Store(fromPublicKeys(keys))
- }
-}
-
-func WithAPELocalOverrideStorage(localOverrideStorage policyengine.LocalOverrideStorage) Option {
- return func(c *cfg) {
- c.localOverrideStorage = localOverrideStorage
- }
-}
-
-func WithAPEMorphRuleStorage(morphRuleStorage policyengine.MorphRuleChainStorageReader) Option {
- return func(c *cfg) {
- c.morphChainStorage = morphRuleStorage
- }
-}
-
-func WithNetmapState(state netmap.State) Option {
- return func(c *cfg) {
- c.state = state
- }
-}
-
-func WithDialerSource(ds *net.DialerSource) Option {
- return func(c *cfg) {
- c.ds = ds
+ c.authorizedKeys = nil
+ for _, key := range keys {
+ c.authorizedKeys = append(c.authorizedKeys, key.Bytes())
+ }
}
}
diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go
deleted file mode 100644
index 8f21686df..000000000
--- a/pkg/services/tree/qos.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package tree
-
-import (
- "context"
-
- "google.golang.org/grpc"
-)
-
-var _ TreeServiceServer = (*ioTagAdjust)(nil)
-
-type AdjustIOTag interface {
- AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context
-}
-
-type ioTagAdjust struct {
- s TreeServiceServer
- a AdjustIOTag
-}
-
-func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer {
- return &ioTagAdjust{
- s: s,
- a: a,
- }
-}
-
-func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Add(ctx, req)
-}
-
-func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.AddByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Apply(ctx, req)
-}
-
-func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.GetNodeByPath(ctx, req)
-}
-
-func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey())
- return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{
- sender: srv,
- ServerStream: srv,
- ctxF: func() context.Context { return ctx },
- })
-}
-
-func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Healthcheck(ctx, req)
-}
-
-func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Move(ctx, req)
-}
-
-func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.Remove(ctx, req)
-}
-
-func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey())
- return i.s.TreeList(ctx, req)
-}
-
-type qosSend[T any] interface {
- Send(T) error
-}
-
-type qosServerWrapper[T any] struct {
- grpc.ServerStream
- sender qosSend[T]
- ctxF func() context.Context
-}
-
-func (w *qosServerWrapper[T]) Send(resp T) error {
- return w.sender.Send(resp)
-}
-
-func (w *qosServerWrapper[T]) Context() context.Context {
- return w.ctxF()
-}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 647f8cb30..ec41a60d4 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -6,32 +6,19 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
- "google.golang.org/grpc"
)
var errNoSuitableNode = errors.New("no node was found to execute the request")
-func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) {
- var resp *Resp
- var outErr error
- err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = callback(c, fCtx, req)
- return true
- })
- if err != nil {
- return nil, err
- }
- return resp, outErr
-}
-
// forEachNode executes callback for each node in the container until true is returned.
// Returns errNoSuitableNode if there was no successful attempt to dial any node.
-func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error {
+func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error {
for _, n := range cntNodes {
if bytes.Equal(n.PublicKey(), s.rawPub) {
return nil
@@ -41,15 +28,25 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
var called bool
for _, n := range cntNodes {
var stop bool
- for endpoint := range n.NetworkEndpoints() {
- stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool {
- called = true
- return f(fCtx, c)
- })
- if called {
- break
+ n.IterateNetworkEndpoints(func(endpoint string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
+ c, err := s.cache.get(ctx, endpoint)
+ if err != nil {
+ return false
}
- }
+
+ s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+
+ called = true
+ stop = f(c)
+ return true
+ })
if stop {
return nil
}
@@ -59,19 +56,3 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
}
return nil
}
-
-func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
- trace.WithAttributes(
- attribute.String("endpoint", endpoint),
- ))
- defer span.End()
-
- c, err := s.cache.get(ctx, endpoint)
- if err != nil {
- return false
- }
-
- s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
- return f(ctx, c)
-}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index ee40884eb..49a37b8be 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -39,7 +40,6 @@ const (
defaultReplicatorCapacity = 64
defaultReplicatorWorkerCount = 64
defaultReplicatorSendTimeout = time.Second * 5
- defaultSyncBatchSize = 1000
)
func (s *Service) localReplicationWorker(ctx context.Context) {
@@ -57,8 +57,8 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
- zap.Error(err))
+ s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
+ zap.String("err", err.Error()))
}
span.End()
}
@@ -71,71 +71,63 @@ func (s *Service) replicationWorker(ctx context.Context) {
case <-s.closeCh:
return
case task := <-s.replicationTasks:
- _ = s.ReplicateTreeOp(ctx, task.n, task.req)
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())),
+ ),
+ )
+ start := time.Now()
+
+ var lastErr error
+ var lastAddr string
+
+ task.n.IterateNetworkEndpoints(func(addr string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
+ lastAddr = addr
+
+ c, err := s.cache.get(ctx, addr)
+ if err != nil {
+ lastErr = fmt.Errorf("can't create client: %w", err)
+ return false
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
+ _, lastErr = c.Apply(ctx, task.req)
+ cancel()
+
+ return lastErr == nil
+ })
+
+ if lastErr != nil {
+ if errors.Is(lastErr, errRecentlyFailed) {
+ s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else {
+ s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
+ zap.String("last_error", lastErr.Error()),
+ zap.String("address", lastAddr),
+ zap.String("key", hex.EncodeToString(task.n.PublicKey())),
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ }
+ s.metrics.AddReplicateTaskDuration(time.Since(start), false)
+ } else {
+ s.metrics.AddReplicateTaskDuration(time.Since(start), true)
+ }
+ span.End()
}
}
}
-func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req *ApplyRequest) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- ),
- )
- defer span.End()
-
- start := time.Now()
-
- var lastErr error
- var lastAddr string
-
- for addr := range n.NetworkEndpoints() {
- lastAddr = addr
- lastErr = s.apply(ctx, n, addr, req)
- if lastErr == nil {
- break
- }
- }
-
- if lastErr != nil {
- if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
- zap.String("last_error", lastErr.Error()))
- } else {
- s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
- zap.String("last_error", lastErr.Error()),
- zap.String("address", lastAddr),
- zap.String("key", hex.EncodeToString(n.PublicKey())))
- }
- s.metrics.AddReplicateTaskDuration(time.Since(start), false)
- return lastErr
- }
- s.metrics.AddReplicateTaskDuration(time.Since(start), true)
- return nil
-}
-
-func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
- trace.WithAttributes(
- attribute.String("public_key", hex.EncodeToString(n.PublicKey())),
- attribute.String("address", addr),
- ),
- )
- defer span.End()
-
- c, err := s.cache.get(ctx, addr)
- if err != nil {
- return fmt.Errorf("can't create client: %w", err)
- }
-
- ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout)
- _, err = c.Apply(ctx, req)
- cancel()
- return err
-}
-
func (s *Service) replicateLoop(ctx context.Context) {
- for range s.replicatorWorkerCount {
+ for i := 0; i < s.replicatorWorkerCount; i++ {
go s.replicationWorker(ctx)
go s.localReplicationWorker(ctx)
}
@@ -153,10 +145,10 @@ func (s *Service) replicateLoop(ctx context.Context) {
return
case op := <-s.replicateCh:
start := time.Now()
- err := s.replicate(ctx, op)
+ err := s.replicate(op)
if err != nil {
- s.log.Error(ctx, logs.TreeErrorDuringReplication,
- zap.Error(err),
+ s.log.Error(logs.TreeErrorDuringReplication,
+ zap.String("err", err.Error()),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
}
@@ -165,14 +157,14 @@ func (s *Service) replicateLoop(ctx context.Context) {
}
}
-func (s *Service) replicate(ctx context.Context, op movePair) error {
+func (s *Service) replicate(op movePair) error {
req := newApplyRequest(&op)
err := SignMessage(req, s.key)
if err != nil {
return fmt.Errorf("can't sign data: %w", err)
}
- nodes, localIndex, err := s.getContainerNodes(ctx, op.cid)
+ nodes, localIndex, err := s.getContainerNodes(op.cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -206,7 +198,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
- Meta: op.op.Bytes(),
+ Meta: op.op.Meta.Bytes(),
ChildId: op.op.Child,
},
},
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 3994d6973..57767f87e 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -5,23 +5,17 @@ import (
"context"
"errors"
"fmt"
- "slices"
+ "sort"
"sync"
"sync/atomic"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
// Service represents tree-service capable of working with multiple
@@ -41,8 +35,6 @@ type Service struct {
initialSyncDone atomic.Bool
- apeChecker checkercore.CheckCore
-
// cnrMap contains existing (used) container IDs.
cnrMap map[cidSDK.ID]struct{}
// cnrMapMtx protects cnrMap
@@ -58,19 +50,17 @@ func New(opts ...Option) *Service {
s.replicatorChannelCapacity = defaultReplicatorCapacity
s.replicatorWorkerCount = defaultReplicatorWorkerCount
s.replicatorTimeout = defaultReplicatorSendTimeout
- s.syncBatchSize = defaultSyncBatchSize
s.metrics = defaultMetricsRegister{}
- s.authorizedKeys.Store(&[][]byte{})
for i := range opts {
opts[i](&s.cfg)
}
if s.log == nil {
- s.log = logger.NewLoggerWrapper(zap.NewNop())
+ s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init(s.key, s.ds)
+ s.cache.init()
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
@@ -80,14 +70,11 @@ func New(opts ...Option) *Service {
s.syncChan = make(chan struct{})
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)
- s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state)
-
return &s
}
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
- ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)
@@ -107,7 +94,6 @@ func (s *Service) Shutdown() {
}
func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) {
- defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -119,17 +105,26 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add)
+ var resp *AddResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Add(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
@@ -151,7 +146,6 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) {
- defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -163,17 +157,26 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath)
+ var resp *AddByPathResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.AddByPath(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
meta := protoToMeta(b.GetMeta())
@@ -207,7 +210,6 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) {
- defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -219,17 +221,26 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove)
+ var resp *RemoveResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Remove(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
if b.GetNodeId() == pilorama.RootID {
@@ -252,7 +263,6 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
// Move applies client operation to the specified tree and pushes in queue
// for replication on other nodes.
func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) {
- defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -264,17 +274,26 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectPut)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move)
+ var resp *MoveResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.Move(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
if b.GetNodeId() == pilorama.RootID {
@@ -296,7 +315,6 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
- defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -308,17 +326,26 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath)
+ var resp *GetNodeByPathResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.GetNodeByPath(ctx, req)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
attr := b.GetPathAttribute()
@@ -331,7 +358,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- info := make([]GetNodeByPathResponse_Info, 0, len(nodes))
+ info := make([]*GetNodeByPathResponse_Info, 0, len(nodes))
for _, node := range nodes {
m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node)
if err != nil {
@@ -342,21 +369,22 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
x.ParentId = parent
x.NodeId = node
x.Timestamp = m.Time
- if b.GetAllAttributes() {
+ if b.AllAttributes {
x.Meta = metaToProto(m.Items)
} else {
- var metaValue []KeyValue
for _, kv := range m.Items {
- if slices.Contains(b.GetAttributes(), kv.Key) {
- metaValue = append(metaValue, KeyValue{
- Key: kv.Key,
- Value: kv.Value,
- })
+ for _, attr := range b.GetAttributes() {
+ if kv.Key == attr {
+ x.Meta = append(x.Meta, &KeyValue{
+ Key: kv.Key,
+ Value: kv.Value,
+ })
+ break
+ }
}
}
- x.Meta = metaValue
}
- info = append(info, x)
+ info = append(info, &x)
}
return &GetNodeByPathResponse{
@@ -367,7 +395,6 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error {
- defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -379,20 +406,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return err
}
- err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet)
+ err := s.verifyClient(req, cid, b.GetBearerToken(), acl.OpObjectGet)
if err != nil {
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetSubTreeClient
var outErr error
- err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetSubTree(fCtx, req)
+ err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetSubTree(srv.Context(), req)
return true
})
if err != nil {
@@ -411,124 +438,15 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return getSubTree(srv.Context(), srv, cid, b, s.forest)
}
-type stackItem struct {
- values []pilorama.MultiNodeInfo
- parent pilorama.MultiNode
- last *pilorama.Cursor
-}
-
-func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
- const batchSize = 1000
-
- // For backward compatibility.
- rootIDs := b.GetRootId()
- if len(rootIDs) == 0 {
- rootIDs = []uint64{0}
- }
-
- // Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
- // recursive implementation is not suitable here, so we maintain explicit stack.
- var ms []pilorama.KeyValue
- var ps []uint64
- var ts []uint64
- for _, rootID := range rootIDs {
- m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID)
- if err != nil {
- return err
- }
- if ms == nil {
- ms = m.Items
- } else if len(m.Items) != 1 {
- return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided")
- }
- ts = append(ts, m.Time)
- ps = append(ps, p)
- }
-
- stack := []stackItem{{
- values: []pilorama.MultiNodeInfo{{
- Children: rootIDs,
- Timestamps: ts,
- Meta: ms,
- Parents: ps,
- }},
- parent: ps,
- }}
-
- for {
- if len(stack) == 0 {
- break
- } else if item := &stack[len(stack)-1]; len(item.values) == 0 {
- if len(stack) == 1 {
- break
- }
-
- var err error
- item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize)
- if err != nil {
- return err
- }
-
- if len(item.values) == 0 {
- stack = stack[:len(stack)-1]
- continue
- }
- }
-
- node, err := stackPopAndSend(stack, srv)
- if err != nil {
- return err
- }
-
- if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
- children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.Children, nil, batchSize)
- if err != nil {
- return err
- }
- if len(children) != 0 {
- stack = append(stack, stackItem{
- values: children,
- parent: node.Children,
- last: last,
- })
- }
- }
- }
- return nil
-}
-
-func stackPopAndSend(stack []stackItem, srv TreeService_GetSubTreeServer) (pilorama.MultiNodeInfo, error) {
- node := stack[len(stack)-1].values[0]
- stack[len(stack)-1].values = stack[len(stack)-1].values[1:]
-
- return node, srv.Send(&GetSubTreeResponse{
- Body: &GetSubTreeResponse_Body{
- NodeId: node.Children,
- ParentId: node.Parents,
- Timestamp: node.Timestamps,
- Meta: metaToProto(node.Meta),
- },
- })
-}
-
func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
- if b.GetOrderBy().GetDirection() == GetSubTreeRequest_Body_Order_Asc {
- return getSortedSubTree(ctx, srv, cid, b, forest)
- }
-
- var rootID uint64
- if len(b.GetRootId()) > 0 {
- rootID = b.GetRootId()[0]
- }
-
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
// recursive implementation is not suitable here, so we maintain explicit stack.
- m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), rootID)
+ m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), b.GetRootId())
if err != nil {
return err
}
stack := [][]pilorama.NodeInfo{{{
- ID: rootID,
+ ID: b.GetRootId(),
Meta: m,
ParentID: p,
}}}
@@ -546,9 +464,9 @@ func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSD
err = srv.Send(&GetSubTreeResponse{
Body: &GetSubTreeResponse_Body{
- NodeId: []uint64{node.ID},
- ParentId: []uint64{node.ParentID},
- Timestamp: []uint64{node.Meta.Time},
+ NodeId: node.ID,
+ ParentId: node.ParentID,
+ Timestamp: node.Meta.Time,
Meta: metaToProto(node.Meta.Items),
},
})
@@ -581,9 +499,10 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
if len(nodes) == 0 {
return nodes, nil
}
- slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int {
- return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename))
- })
+ less := func(i, j int) bool {
+ return bytes.Compare(nodes[i].Meta.GetAttr(pilorama.AttributeFilename), nodes[j].Meta.GetAttr(pilorama.AttributeFilename)) < 0
+ }
+ sort.Slice(nodes, less)
return nodes, nil
default:
return nil, fmt.Errorf("unsupported order direction: %s", d.String())
@@ -591,8 +510,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
- defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx))
+func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -605,7 +523,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(ctx, cid, key)
+ _, pos, _, err := s.getContainerInfo(cid, key)
if err != nil {
return nil, err
}
@@ -636,7 +554,6 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse,
}
func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error {
- defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context()))
if !s.initialSyncDone.Load() {
return ErrAlreadySyncing
}
@@ -648,15 +565,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
return err
}
- ns, pos, err := s.getContainerNodes(srv.Context(), cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return err
}
if pos < 0 {
var cli TreeService_GetOpLogClient
var outErr error
- err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool {
- cli, outErr = c.GetOpLog(fCtx, req)
+ err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool {
+ cli, outErr = c.GetOpLog(srv.Context(), req)
return true
})
if err != nil {
@@ -687,7 +604,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
- Meta: lm.Bytes(),
+ Meta: lm.Meta.Bytes(),
ChildId: lm.Child,
},
},
@@ -701,7 +618,6 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
}
func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) {
- defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx))
if !s.initialSyncDone.Load() {
return nil, ErrAlreadySyncing
}
@@ -721,12 +637,21 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return nil, err
}
- ns, pos, err := s.getContainerNodes(ctx, cid)
+ ns, pos, err := s.getContainerNodes(cid)
if err != nil {
return nil, err
}
if pos < 0 {
- return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList)
+ var resp *TreeListResponse
+ var outErr error
+ err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(ctx, req)
+ return outErr == nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return resp, outErr
}
ids, err := s.forest.TreeList(ctx, cid)
@@ -741,19 +666,21 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
}, nil
}
-func protoToMeta(arr []KeyValue) []pilorama.KeyValue {
+func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
meta := make([]pilorama.KeyValue, len(arr))
for i, kv := range arr {
- meta[i].Key = kv.GetKey()
- meta[i].Value = kv.GetValue()
+ if kv != nil {
+ meta[i].Key = kv.Key
+ meta[i].Value = kv.Value
+ }
}
return meta
}
-func metaToProto(arr []pilorama.KeyValue) []KeyValue {
- meta := make([]KeyValue, len(arr))
+func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
+ meta := make([]*KeyValue, len(arr))
for i, kv := range arr {
- meta[i] = KeyValue{
+ meta[i] = &KeyValue{
Key: kv.Key,
Value: kv.Value,
}
@@ -763,8 +690,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
- cntNodes, _, err := s.getContainerNodes(ctx, cid)
+func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+ cntNodes, _, err := s.getContainerNodes(cid)
if err != nil {
return nil, 0, 0, err
}
@@ -784,15 +711,3 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec
return new(HealthcheckResponse), nil
}
-
-func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) {
- s.authorizedKeys.Store(fromPublicKeys(newKeys))
-}
-
-func fromPublicKeys(keys keys.PublicKeys) *[][]byte {
- buff := make([][]byte, len(keys))
- for i, k := range keys {
- buff[i] = k.Bytes()
- }
- return &buff
-}
diff --git a/pkg/services/tree/service.pb.go b/pkg/services/tree/service.pb.go
new file mode 100644
index 000000000..63f3e714a
--- /dev/null
+++ b/pkg/services/tree/service.pb.go
@@ -0,0 +1,3585 @@
+//*
+// Service for working with CRDT tree.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.9
+// source: pkg/services/tree/service.proto
+
+package tree
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type GetSubTreeRequest_Body_Order_Direction int32
+
+const (
+ GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0
+ GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1
+)
+
+// Enum value maps for GetSubTreeRequest_Body_Order_Direction.
+var (
+ GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{
+ 0: "None",
+ 1: "Asc",
+ }
+ GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{
+ "None": 0,
+ "Asc": 1,
+ }
+)
+
+func (x GetSubTreeRequest_Body_Order_Direction) Enum() *GetSubTreeRequest_Body_Order_Direction {
+ p := new(GetSubTreeRequest_Body_Order_Direction)
+ *p = x
+ return p
+}
+
+func (x GetSubTreeRequest_Body_Order_Direction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GetSubTreeRequest_Body_Order_Direction) Descriptor() protoreflect.EnumDescriptor {
+ return file_pkg_services_tree_service_proto_enumTypes[0].Descriptor()
+}
+
+func (GetSubTreeRequest_Body_Order_Direction) Type() protoreflect.EnumType {
+ return &file_pkg_services_tree_service_proto_enumTypes[0]
+}
+
+func (x GetSubTreeRequest_Body_Order_Direction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GetSubTreeRequest_Body_Order_Direction.Descriptor instead.
+func (GetSubTreeRequest_Body_Order_Direction) EnumDescriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0, 0}
+}
+
+type AddRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *AddRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *AddRequest) Reset() {
+ *x = AddRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddRequest) ProtoMessage() {}
+
+func (x *AddRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead.
+func (*AddRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AddRequest) GetBody() *AddRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *AddRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type AddResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *AddResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *AddResponse) Reset() {
+ *x = AddResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddResponse) ProtoMessage() {}
+
+func (x *AddResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddResponse.ProtoReflect.Descriptor instead.
+func (*AddResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *AddResponse) GetBody() *AddResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *AddResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type AddByPathRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *AddByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *AddByPathRequest) Reset() {
+ *x = AddByPathRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddByPathRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddByPathRequest) ProtoMessage() {}
+
+func (x *AddByPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddByPathRequest.ProtoReflect.Descriptor instead.
+func (*AddByPathRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *AddByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type AddByPathResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *AddByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *AddByPathResponse) Reset() {
+ *x = AddByPathResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddByPathResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddByPathResponse) ProtoMessage() {}
+
+func (x *AddByPathResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddByPathResponse.ProtoReflect.Descriptor instead.
+func (*AddByPathResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *AddByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type RemoveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *RemoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *RemoveRequest) Reset() {
+ *x = RemoveRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveRequest) ProtoMessage() {}
+
+func (x *RemoveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead.
+func (*RemoveRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *RemoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type RemoveResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *RemoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *RemoveResponse) Reset() {
+ *x = RemoveResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveResponse) ProtoMessage() {}
+
+func (x *RemoveResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead.
+func (*RemoveResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *RemoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type MoveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *MoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *MoveRequest) Reset() {
+ *x = MoveRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveRequest) ProtoMessage() {}
+
+func (x *MoveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead.
+func (*MoveRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MoveRequest) GetBody() *MoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *MoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type MoveResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *MoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *MoveResponse) Reset() {
+ *x = MoveResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveResponse) ProtoMessage() {}
+
+func (x *MoveResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead.
+func (*MoveResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *MoveResponse) GetBody() *MoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *MoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetNodeByPathRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *GetNodeByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetNodeByPathRequest) Reset() {
+ *x = GetNodeByPathRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeByPathRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeByPathRequest) ProtoMessage() {}
+
+func (x *GetNodeByPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeByPathRequest.ProtoReflect.Descriptor instead.
+func (*GetNodeByPathRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetNodeByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetNodeByPathResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *GetNodeByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetNodeByPathResponse) Reset() {
+ *x = GetNodeByPathResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeByPathResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeByPathResponse) ProtoMessage() {}
+
+func (x *GetNodeByPathResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeByPathResponse.ProtoReflect.Descriptor instead.
+func (*GetNodeByPathResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetNodeByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetSubTreeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *GetSubTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetSubTreeRequest) Reset() {
+ *x = GetSubTreeRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSubTreeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSubTreeRequest) ProtoMessage() {}
+
+func (x *GetSubTreeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSubTreeRequest.ProtoReflect.Descriptor instead.
+func (*GetSubTreeRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetSubTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetSubTreeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *GetSubTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetSubTreeResponse) Reset() {
+ *x = GetSubTreeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSubTreeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSubTreeResponse) ProtoMessage() {}
+
+func (x *GetSubTreeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSubTreeResponse.ProtoReflect.Descriptor instead.
+func (*GetSubTreeResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetSubTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type TreeListRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *TreeListRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *TreeListRequest) Reset() {
+ *x = TreeListRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TreeListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TreeListRequest) ProtoMessage() {}
+
+func (x *TreeListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TreeListRequest.ProtoReflect.Descriptor instead.
+func (*TreeListRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *TreeListRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type TreeListResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *TreeListResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *TreeListResponse) Reset() {
+ *x = TreeListResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TreeListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TreeListResponse) ProtoMessage() {}
+
+func (x *TreeListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TreeListResponse.ProtoReflect.Descriptor instead.
+func (*TreeListResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *TreeListResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type ApplyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *ApplyRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *ApplyRequest) Reset() {
+ *x = ApplyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApplyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyRequest) ProtoMessage() {}
+
+func (x *ApplyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead.
+func (*ApplyRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *ApplyRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type ApplyResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *ApplyResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *ApplyResponse) Reset() {
+ *x = ApplyResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApplyResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyResponse) ProtoMessage() {}
+
+func (x *ApplyResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead.
+func (*ApplyResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *ApplyResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetOpLogRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *GetOpLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetOpLogRequest) Reset() {
+ *x = GetOpLogRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOpLogRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOpLogRequest) ProtoMessage() {}
+
+func (x *GetOpLogRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOpLogRequest.ProtoReflect.Descriptor instead.
+func (*GetOpLogRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetOpLogRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type GetOpLogResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *GetOpLogResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *GetOpLogResponse) Reset() {
+ *x = GetOpLogResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOpLogResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOpLogResponse) ProtoMessage() {}
+
+func (x *GetOpLogResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOpLogResponse.ProtoReflect.Descriptor instead.
+func (*GetOpLogResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *GetOpLogResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type HealthcheckResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Response body.
+ Body *HealthcheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Response signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthcheckResponse) Reset() {
+ *x = HealthcheckResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthcheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthcheckResponse) ProtoMessage() {}
+
+func (x *HealthcheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthcheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthcheckResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthcheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type HealthcheckRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Request body.
+ Body *HealthcheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ // Request signature.
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *HealthcheckRequest) Reset() {
+ *x = HealthcheckRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthcheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthcheckRequest) ProtoMessage() {}
+
+func (x *HealthcheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthcheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthcheckRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *HealthcheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+type AddRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // ID of the parent to attach node to.
+ ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+ // Key-Value pairs with meta information.
+ Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+}
+
+func (x *AddRequest_Body) Reset() {
+ *x = AddRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddRequest_Body) ProtoMessage() {}
+
+func (x *AddRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddRequest_Body.ProtoReflect.Descriptor instead.
+func (*AddRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *AddRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *AddRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *AddRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+func (x *AddRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+func (x *AddRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+type AddResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the created node.
+ NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+}
+
+func (x *AddResponse_Body) Reset() {
+ *x = AddResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddResponse_Body) ProtoMessage() {}
+
+func (x *AddResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddResponse_Body.ProtoReflect.Descriptor instead.
+func (*AddResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *AddResponse_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+
+type AddByPathRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // Attribute to build path with. Default: "FileName".
+ PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
+ // List of path components.
+ Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
+ // Node meta-information.
+ Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+}
+
+func (x *AddByPathRequest_Body) Reset() {
+ *x = AddByPathRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddByPathRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddByPathRequest_Body) ProtoMessage() {}
+
+func (x *AddByPathRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddByPathRequest_Body.ProtoReflect.Descriptor instead.
+func (*AddByPathRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *AddByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *AddByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *AddByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+
+func (x *AddByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+func (x *AddByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+type AddByPathResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of all created nodes. The first one is the leaf.
+ Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes,proto3" json:"nodes,omitempty"`
+ // ID of the parent node where new nodes were attached.
+ ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+}
+
+func (x *AddByPathResponse_Body) Reset() {
+ *x = AddByPathResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AddByPathResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddByPathResponse_Body) ProtoMessage() {}
+
+func (x *AddByPathResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddByPathResponse_Body.ProtoReflect.Descriptor instead.
+func (*AddByPathResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *AddByPathResponse_Body) GetNodes() []uint64 {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+
+func (x *AddByPathResponse_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+type RemoveRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // ID of the node to remove.
+ NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,4,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+}
+
+func (x *RemoveRequest_Body) Reset() {
+ *x = RemoveRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveRequest_Body) ProtoMessage() {}
+
+func (x *RemoveRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveRequest_Body.ProtoReflect.Descriptor instead.
+func (*RemoveRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *RemoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *RemoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *RemoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+
+func (x *RemoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+type RemoveResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RemoveResponse_Body) Reset() {
+ *x = RemoveResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoveResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoveResponse_Body) ProtoMessage() {}
+
+func (x *RemoveResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoveResponse_Body.ProtoReflect.Descriptor instead.
+func (*RemoveResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5, 0}
+}
+
+type MoveRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // TODO import neo.fs.v2.refs.ContainerID directly.
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // ID of the new parent.
+ ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+ // ID of the node to move.
+ NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // Node meta-information.
+ Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+}
+
+func (x *MoveRequest_Body) Reset() {
+ *x = MoveRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveRequest_Body) ProtoMessage() {}
+
+func (x *MoveRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveRequest_Body.ProtoReflect.Descriptor instead.
+func (*MoveRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *MoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *MoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *MoveRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+func (x *MoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+
+func (x *MoveRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+func (x *MoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+type MoveResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *MoveResponse_Body) Reset() {
+ *x = MoveResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveResponse_Body) ProtoMessage() {}
+
+func (x *MoveResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveResponse_Body.ProtoReflect.Descriptor instead.
+func (*MoveResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7, 0}
+}
+
+type GetNodeByPathRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // Attribute to build path with. Default: "FileName".
+ PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
+ // List of path components.
+ Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
+ // List of attributes to include in response.
+ Attributes []string `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // Flag to return only the latest version of node.
+ LatestOnly bool `protobuf:"varint,6,opt,name=latest_only,json=latestOnly,proto3" json:"latest_only,omitempty"`
+ // Flag to return all stored attributes.
+ AllAttributes bool `protobuf:"varint,7,opt,name=all_attributes,json=allAttributes,proto3" json:"all_attributes,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,8,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+}
+
+func (x *GetNodeByPathRequest_Body) Reset() {
+ *x = GetNodeByPathRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeByPathRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeByPathRequest_Body) ProtoMessage() {}
+
+func (x *GetNodeByPathRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeByPathRequest_Body.ProtoReflect.Descriptor instead.
+func (*GetNodeByPathRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *GetNodeByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+
+func (x *GetNodeByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+
+func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
+ if x != nil {
+ return x.LatestOnly
+ }
+ return false
+}
+
+func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
+ if x != nil {
+ return x.AllAttributes
+ }
+ return false
+}
+
+func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+// Information about a single tree node.
+type GetNodeByPathResponse_Info struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Node ID.
+ NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // Timestamp of the last operation with the node.
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // Node meta-information.
+ Meta []*KeyValue `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty"`
+ // Parent ID.
+ ParentId uint64 `protobuf:"varint,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+}
+
+func (x *GetNodeByPathResponse_Info) Reset() {
+ *x = GetNodeByPathResponse_Info{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeByPathResponse_Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeByPathResponse_Info) ProtoMessage() {}
+
+func (x *GetNodeByPathResponse_Info) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeByPathResponse_Info.ProtoReflect.Descriptor instead.
+func (*GetNodeByPathResponse_Info) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 0}
+}
+
+func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+
+func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+type GetNodeByPathResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of nodes stored by path.
+ Nodes []*GetNodeByPathResponse_Info `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
+}
+
+func (x *GetNodeByPathResponse_Body) Reset() {
+ *x = GetNodeByPathResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeByPathResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeByPathResponse_Body) ProtoMessage() {}
+
+func (x *GetNodeByPathResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeByPathResponse_Body.ProtoReflect.Descriptor instead.
+func (*GetNodeByPathResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 1}
+}
+
+func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+
+type GetSubTreeRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // ID of the root node of a subtree.
+ RootId uint64 `protobuf:"varint,3,opt,name=root_id,json=rootId,proto3" json:"root_id,omitempty"`
+ // Optional depth of the traversal. Zero means return only root.
+ // Maximum depth is 10.
+ Depth uint32 `protobuf:"varint,4,opt,name=depth,proto3" json:"depth,omitempty"`
+ // Bearer token in V2 format.
+ BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
+ // Result ordering.
+ OrderBy *GetSubTreeRequest_Body_Order `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+}
+
+func (x *GetSubTreeRequest_Body) Reset() {
+ *x = GetSubTreeRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSubTreeRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSubTreeRequest_Body) ProtoMessage() {}
+
+func (x *GetSubTreeRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSubTreeRequest_Body.ProtoReflect.Descriptor instead.
+func (*GetSubTreeRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0}
+}
+
+func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *GetSubTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *GetSubTreeRequest_Body) GetRootId() uint64 {
+ if x != nil {
+ return x.RootId
+ }
+ return 0
+}
+
+func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
+ if x != nil {
+ return x.Depth
+ }
+ return 0
+}
+
+func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+
+func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order {
+ if x != nil {
+ return x.OrderBy
+ }
+ return nil
+}
+
+type GetSubTreeRequest_Body_Order struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Direction GetSubTreeRequest_Body_Order_Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=tree.GetSubTreeRequest_Body_Order_Direction" json:"direction,omitempty"`
+}
+
+func (x *GetSubTreeRequest_Body_Order) Reset() {
+ *x = GetSubTreeRequest_Body_Order{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSubTreeRequest_Body_Order) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSubTreeRequest_Body_Order) ProtoMessage() {}
+
+func (x *GetSubTreeRequest_Body_Order) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSubTreeRequest_Body_Order.ProtoReflect.Descriptor instead.
+func (*GetSubTreeRequest_Body_Order) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0}
+}
+
+func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return GetSubTreeRequest_Body_Order_None
+}
+
+type GetSubTreeResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the node.
+ NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ // ID of the parent.
+ ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+ // Time node was first added to a tree.
+ Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // Node meta-information.
+ Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
+}
+
+func (x *GetSubTreeResponse_Body) Reset() {
+ *x = GetSubTreeResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSubTreeResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSubTreeResponse_Body) ProtoMessage() {}
+
+func (x *GetSubTreeResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSubTreeResponse_Body.ProtoReflect.Descriptor instead.
+func (*GetSubTreeResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11, 0}
+}
+
+func (x *GetSubTreeResponse_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+
+func (x *GetSubTreeResponse_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+func (x *GetSubTreeResponse_Body) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+type TreeListRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (x *TreeListRequest_Body) Reset() {
+ *x = TreeListRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TreeListRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TreeListRequest_Body) ProtoMessage() {}
+
+func (x *TreeListRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TreeListRequest_Body.ProtoReflect.Descriptor instead.
+func (*TreeListRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *TreeListRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+type TreeListResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Tree IDs.
+ Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
+}
+
+func (x *TreeListResponse_Body) Reset() {
+ *x = TreeListResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TreeListResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TreeListResponse_Body) ProtoMessage() {}
+
+func (x *TreeListResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TreeListResponse_Body.ProtoReflect.Descriptor instead.
+func (*TreeListResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13, 0}
+}
+
+func (x *TreeListResponse_Body) GetIds() []string {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+
+type ApplyRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // Operation to be applied.
+ Operation *LogMove `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"`
+}
+
+func (x *ApplyRequest_Body) Reset() {
+ *x = ApplyRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApplyRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyRequest_Body) ProtoMessage() {}
+
+func (x *ApplyRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[36]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyRequest_Body.ProtoReflect.Descriptor instead.
+func (*ApplyRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14, 0}
+}
+
+func (x *ApplyRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *ApplyRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *ApplyRequest_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+
+type ApplyResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ApplyResponse_Body) Reset() {
+ *x = ApplyResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApplyResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyResponse_Body) ProtoMessage() {}
+
+func (x *ApplyResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyResponse_Body.ProtoReflect.Descriptor instead.
+func (*ApplyResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15, 0}
+}
+
+type GetOpLogRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Container ID in V2 format.
+ ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+ // The name of the tree.
+ TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
+ // Starting height to return logs from.
+ Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+ // Amount of operations to return.
+ Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *GetOpLogRequest_Body) Reset() {
+ *x = GetOpLogRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOpLogRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOpLogRequest_Body) ProtoMessage() {}
+
+func (x *GetOpLogRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOpLogRequest_Body.ProtoReflect.Descriptor instead.
+func (*GetOpLogRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16, 0}
+}
+
+func (x *GetOpLogRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+
+func (x *GetOpLogRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+
+func (x *GetOpLogRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+
+func (x *GetOpLogRequest_Body) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type GetOpLogResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Operation on a tree.
+ Operation *LogMove `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
+}
+
+func (x *GetOpLogResponse_Body) Reset() {
+ *x = GetOpLogResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetOpLogResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOpLogResponse_Body) ProtoMessage() {}
+
+func (x *GetOpLogResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[39]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOpLogResponse_Body.ProtoReflect.Descriptor instead.
+func (*GetOpLogResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17, 0}
+}
+
+func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+
+type HealthcheckResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HealthcheckResponse_Body) Reset() {
+ *x = HealthcheckResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthcheckResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthcheckResponse_Body) ProtoMessage() {}
+
+func (x *HealthcheckResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[40]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthcheckResponse_Body.ProtoReflect.Descriptor instead.
+func (*HealthcheckResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18, 0}
+}
+
+type HealthcheckRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HealthcheckRequest_Body) Reset() {
+ *x = HealthcheckRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthcheckRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthcheckRequest_Body) ProtoMessage() {}
+
+func (x *HealthcheckRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_service_proto_msgTypes[41]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthcheckRequest_Body.ProtoReflect.Descriptor instead.
+func (*HealthcheckRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19, 0}
+}
+
+var File_pkg_services_tree_service_proto protoreflect.FileDescriptor
+
+var file_pkg_services_tree_service_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
+ 0x72, 0x65, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x04, 0x74, 0x72, 0x65, 0x65, 0x1a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x02, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
+ 0xa6, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74,
+ 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72,
+ 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
+ 0x64, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61,
+ 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x0b, 0x41, 0x64, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x1a, 0x1f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6e,
+ 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f,
+ 0x64, 0x65, 0x49, 0x64, 0x22, 0xb9, 0x02, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61,
+ 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41,
+ 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
+ 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25,
+ 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74,
+ 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a,
+ 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0xaf, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42,
+ 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
+ 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05,
+ 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x49, 0x64, 0x22, 0xec, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
+ 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07,
+ 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
+ 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x21,
+ 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0x76, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xaa, 0x02, 0x0a, 0x0b, 0x4d, 0x6f,
+ 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d,
+ 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x1a, 0xbf, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a,
+ 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12,
+ 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d,
+ 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65,
+ 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x72, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x85, 0x03, 0x0a, 0x14, 0x47,
+ 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65,
+ 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
+ 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x88, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
+ 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e,
+ 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62,
+ 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73,
+ 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x5f,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12,
+ 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79,
+ 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65,
+ 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04,
+ 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
+ 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
+ 0x64, 0x1a, 0x3e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x6e, 0x6f, 0x64,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65,
+ 0x73, 0x22, 0xbf, 0x03, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42,
+ 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
+ 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xc8, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64,
+ 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
+ 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a,
+ 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c,
+ 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x3d, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54,
+ 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e,
+ 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x1a, 0x73,
+ 0x0a, 0x05, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x72, 0x65,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x2e, 0x44,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x22, 0x1e, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x73,
+ 0x63, 0x10, 0x01, 0x22, 0xf6, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72,
+ 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04,
+ 0x42, 0x6f, 0x64, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a,
+ 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a,
+ 0x0f, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
+ 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x10, 0x54,
+ 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
+ 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xdb, 0x01, 0x0a, 0x0c, 0x41, 0x70,
+ 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64,
+ 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65,
+ 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x6f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
+ 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x09, 0x6f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70,
+ 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79,
+ 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65,
+ 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2, 0x01,
+ 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x1a, 0x70, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74,
+ 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72,
+ 0x65, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
+ 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x33, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76,
+ 0x65, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x01, 0x0a,
+ 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
+ 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22,
+ 0x7e, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
+ 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32,
+ 0xd6, 0x04, 0x0a, 0x0b, 0x54, 0x72, 0x65, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
+ 0x2a, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x10, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x09, 0x41,
+ 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74,
+ 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x52, 0x65, 0x6d,
+ 0x6f, 0x76, 0x65, 0x12, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
+ 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d,
+ 0x0a, 0x04, 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f,
+ 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x72, 0x65, 0x65,
+ 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a,
+ 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1a,
+ 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, 0x65,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x75,
+ 0x62, 0x54, 0x72, 0x65, 0x65, 0x12, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18,
+ 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x54, 0x72,
+ 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72,
+ 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x12,
+ 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x70,
+ 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70,
+ 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x72, 0x65,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
+ 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e,
+ 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75,
+ 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66,
+ 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_tree_service_proto_rawDescOnce sync.Once
+ file_pkg_services_tree_service_proto_rawDescData = file_pkg_services_tree_service_proto_rawDesc
+)
+
+func file_pkg_services_tree_service_proto_rawDescGZIP() []byte {
+ file_pkg_services_tree_service_proto_rawDescOnce.Do(func() {
+ file_pkg_services_tree_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_service_proto_rawDescData)
+ })
+ return file_pkg_services_tree_service_proto_rawDescData
+}
+
+var file_pkg_services_tree_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_pkg_services_tree_service_proto_msgTypes = make([]protoimpl.MessageInfo, 42)
+var file_pkg_services_tree_service_proto_goTypes = []interface{}{
+ (GetSubTreeRequest_Body_Order_Direction)(0), // 0: tree.GetSubTreeRequest.Body.Order.Direction
+ (*AddRequest)(nil), // 1: tree.AddRequest
+ (*AddResponse)(nil), // 2: tree.AddResponse
+ (*AddByPathRequest)(nil), // 3: tree.AddByPathRequest
+ (*AddByPathResponse)(nil), // 4: tree.AddByPathResponse
+ (*RemoveRequest)(nil), // 5: tree.RemoveRequest
+ (*RemoveResponse)(nil), // 6: tree.RemoveResponse
+ (*MoveRequest)(nil), // 7: tree.MoveRequest
+ (*MoveResponse)(nil), // 8: tree.MoveResponse
+ (*GetNodeByPathRequest)(nil), // 9: tree.GetNodeByPathRequest
+ (*GetNodeByPathResponse)(nil), // 10: tree.GetNodeByPathResponse
+ (*GetSubTreeRequest)(nil), // 11: tree.GetSubTreeRequest
+ (*GetSubTreeResponse)(nil), // 12: tree.GetSubTreeResponse
+ (*TreeListRequest)(nil), // 13: tree.TreeListRequest
+ (*TreeListResponse)(nil), // 14: tree.TreeListResponse
+ (*ApplyRequest)(nil), // 15: tree.ApplyRequest
+ (*ApplyResponse)(nil), // 16: tree.ApplyResponse
+ (*GetOpLogRequest)(nil), // 17: tree.GetOpLogRequest
+ (*GetOpLogResponse)(nil), // 18: tree.GetOpLogResponse
+ (*HealthcheckResponse)(nil), // 19: tree.HealthcheckResponse
+ (*HealthcheckRequest)(nil), // 20: tree.HealthcheckRequest
+ (*AddRequest_Body)(nil), // 21: tree.AddRequest.Body
+ (*AddResponse_Body)(nil), // 22: tree.AddResponse.Body
+ (*AddByPathRequest_Body)(nil), // 23: tree.AddByPathRequest.Body
+ (*AddByPathResponse_Body)(nil), // 24: tree.AddByPathResponse.Body
+ (*RemoveRequest_Body)(nil), // 25: tree.RemoveRequest.Body
+ (*RemoveResponse_Body)(nil), // 26: tree.RemoveResponse.Body
+ (*MoveRequest_Body)(nil), // 27: tree.MoveRequest.Body
+ (*MoveResponse_Body)(nil), // 28: tree.MoveResponse.Body
+ (*GetNodeByPathRequest_Body)(nil), // 29: tree.GetNodeByPathRequest.Body
+ (*GetNodeByPathResponse_Info)(nil), // 30: tree.GetNodeByPathResponse.Info
+ (*GetNodeByPathResponse_Body)(nil), // 31: tree.GetNodeByPathResponse.Body
+ (*GetSubTreeRequest_Body)(nil), // 32: tree.GetSubTreeRequest.Body
+ (*GetSubTreeRequest_Body_Order)(nil), // 33: tree.GetSubTreeRequest.Body.Order
+ (*GetSubTreeResponse_Body)(nil), // 34: tree.GetSubTreeResponse.Body
+ (*TreeListRequest_Body)(nil), // 35: tree.TreeListRequest.Body
+ (*TreeListResponse_Body)(nil), // 36: tree.TreeListResponse.Body
+ (*ApplyRequest_Body)(nil), // 37: tree.ApplyRequest.Body
+ (*ApplyResponse_Body)(nil), // 38: tree.ApplyResponse.Body
+ (*GetOpLogRequest_Body)(nil), // 39: tree.GetOpLogRequest.Body
+ (*GetOpLogResponse_Body)(nil), // 40: tree.GetOpLogResponse.Body
+ (*HealthcheckResponse_Body)(nil), // 41: tree.HealthcheckResponse.Body
+ (*HealthcheckRequest_Body)(nil), // 42: tree.HealthcheckRequest.Body
+ (*Signature)(nil), // 43: tree.Signature
+ (*KeyValue)(nil), // 44: tree.KeyValue
+ (*LogMove)(nil), // 45: tree.LogMove
+}
+var file_pkg_services_tree_service_proto_depIdxs = []int32{
+ 21, // 0: tree.AddRequest.body:type_name -> tree.AddRequest.Body
+ 43, // 1: tree.AddRequest.signature:type_name -> tree.Signature
+ 22, // 2: tree.AddResponse.body:type_name -> tree.AddResponse.Body
+ 43, // 3: tree.AddResponse.signature:type_name -> tree.Signature
+ 23, // 4: tree.AddByPathRequest.body:type_name -> tree.AddByPathRequest.Body
+ 43, // 5: tree.AddByPathRequest.signature:type_name -> tree.Signature
+ 24, // 6: tree.AddByPathResponse.body:type_name -> tree.AddByPathResponse.Body
+ 43, // 7: tree.AddByPathResponse.signature:type_name -> tree.Signature
+ 25, // 8: tree.RemoveRequest.body:type_name -> tree.RemoveRequest.Body
+ 43, // 9: tree.RemoveRequest.signature:type_name -> tree.Signature
+ 26, // 10: tree.RemoveResponse.body:type_name -> tree.RemoveResponse.Body
+ 43, // 11: tree.RemoveResponse.signature:type_name -> tree.Signature
+ 27, // 12: tree.MoveRequest.body:type_name -> tree.MoveRequest.Body
+ 43, // 13: tree.MoveRequest.signature:type_name -> tree.Signature
+ 28, // 14: tree.MoveResponse.body:type_name -> tree.MoveResponse.Body
+ 43, // 15: tree.MoveResponse.signature:type_name -> tree.Signature
+ 29, // 16: tree.GetNodeByPathRequest.body:type_name -> tree.GetNodeByPathRequest.Body
+ 43, // 17: tree.GetNodeByPathRequest.signature:type_name -> tree.Signature
+ 31, // 18: tree.GetNodeByPathResponse.body:type_name -> tree.GetNodeByPathResponse.Body
+ 43, // 19: tree.GetNodeByPathResponse.signature:type_name -> tree.Signature
+ 32, // 20: tree.GetSubTreeRequest.body:type_name -> tree.GetSubTreeRequest.Body
+ 43, // 21: tree.GetSubTreeRequest.signature:type_name -> tree.Signature
+ 34, // 22: tree.GetSubTreeResponse.body:type_name -> tree.GetSubTreeResponse.Body
+ 43, // 23: tree.GetSubTreeResponse.signature:type_name -> tree.Signature
+ 35, // 24: tree.TreeListRequest.body:type_name -> tree.TreeListRequest.Body
+ 43, // 25: tree.TreeListRequest.signature:type_name -> tree.Signature
+ 36, // 26: tree.TreeListResponse.body:type_name -> tree.TreeListResponse.Body
+ 43, // 27: tree.TreeListResponse.signature:type_name -> tree.Signature
+ 37, // 28: tree.ApplyRequest.body:type_name -> tree.ApplyRequest.Body
+ 43, // 29: tree.ApplyRequest.signature:type_name -> tree.Signature
+ 38, // 30: tree.ApplyResponse.body:type_name -> tree.ApplyResponse.Body
+ 43, // 31: tree.ApplyResponse.signature:type_name -> tree.Signature
+ 39, // 32: tree.GetOpLogRequest.body:type_name -> tree.GetOpLogRequest.Body
+ 43, // 33: tree.GetOpLogRequest.signature:type_name -> tree.Signature
+ 40, // 34: tree.GetOpLogResponse.body:type_name -> tree.GetOpLogResponse.Body
+ 43, // 35: tree.GetOpLogResponse.signature:type_name -> tree.Signature
+ 41, // 36: tree.HealthcheckResponse.body:type_name -> tree.HealthcheckResponse.Body
+ 43, // 37: tree.HealthcheckResponse.signature:type_name -> tree.Signature
+ 42, // 38: tree.HealthcheckRequest.body:type_name -> tree.HealthcheckRequest.Body
+ 43, // 39: tree.HealthcheckRequest.signature:type_name -> tree.Signature
+ 44, // 40: tree.AddRequest.Body.meta:type_name -> tree.KeyValue
+ 44, // 41: tree.AddByPathRequest.Body.meta:type_name -> tree.KeyValue
+ 44, // 42: tree.MoveRequest.Body.meta:type_name -> tree.KeyValue
+ 44, // 43: tree.GetNodeByPathResponse.Info.meta:type_name -> tree.KeyValue
+ 30, // 44: tree.GetNodeByPathResponse.Body.nodes:type_name -> tree.GetNodeByPathResponse.Info
+ 33, // 45: tree.GetSubTreeRequest.Body.order_by:type_name -> tree.GetSubTreeRequest.Body.Order
+ 0, // 46: tree.GetSubTreeRequest.Body.Order.direction:type_name -> tree.GetSubTreeRequest.Body.Order.Direction
+ 44, // 47: tree.GetSubTreeResponse.Body.meta:type_name -> tree.KeyValue
+ 45, // 48: tree.ApplyRequest.Body.operation:type_name -> tree.LogMove
+ 45, // 49: tree.GetOpLogResponse.Body.operation:type_name -> tree.LogMove
+ 1, // 50: tree.TreeService.Add:input_type -> tree.AddRequest
+ 3, // 51: tree.TreeService.AddByPath:input_type -> tree.AddByPathRequest
+ 5, // 52: tree.TreeService.Remove:input_type -> tree.RemoveRequest
+ 7, // 53: tree.TreeService.Move:input_type -> tree.MoveRequest
+ 9, // 54: tree.TreeService.GetNodeByPath:input_type -> tree.GetNodeByPathRequest
+ 11, // 55: tree.TreeService.GetSubTree:input_type -> tree.GetSubTreeRequest
+ 13, // 56: tree.TreeService.TreeList:input_type -> tree.TreeListRequest
+ 15, // 57: tree.TreeService.Apply:input_type -> tree.ApplyRequest
+ 17, // 58: tree.TreeService.GetOpLog:input_type -> tree.GetOpLogRequest
+ 20, // 59: tree.TreeService.Healthcheck:input_type -> tree.HealthcheckRequest
+ 2, // 60: tree.TreeService.Add:output_type -> tree.AddResponse
+ 4, // 61: tree.TreeService.AddByPath:output_type -> tree.AddByPathResponse
+ 6, // 62: tree.TreeService.Remove:output_type -> tree.RemoveResponse
+ 8, // 63: tree.TreeService.Move:output_type -> tree.MoveResponse
+ 10, // 64: tree.TreeService.GetNodeByPath:output_type -> tree.GetNodeByPathResponse
+ 12, // 65: tree.TreeService.GetSubTree:output_type -> tree.GetSubTreeResponse
+ 14, // 66: tree.TreeService.TreeList:output_type -> tree.TreeListResponse
+ 16, // 67: tree.TreeService.Apply:output_type -> tree.ApplyResponse
+ 18, // 68: tree.TreeService.GetOpLog:output_type -> tree.GetOpLogResponse
+ 19, // 69: tree.TreeService.Healthcheck:output_type -> tree.HealthcheckResponse
+ 60, // [60:70] is the sub-list for method output_type
+ 50, // [50:60] is the sub-list for method input_type
+ 50, // [50:50] is the sub-list for extension type_name
+ 50, // [50:50] is the sub-list for extension extendee
+ 0, // [0:50] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_tree_service_proto_init() }
+func file_pkg_services_tree_service_proto_init() {
+ if File_pkg_services_tree_service_proto != nil {
+ return
+ }
+ file_pkg_services_tree_types_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_tree_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddByPathRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddByPathResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MoveRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MoveResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeByPathRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeByPathResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSubTreeRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSubTreeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TreeListRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TreeListResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApplyRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApplyResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOpLogRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOpLogResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthcheckResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthcheckRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddByPathRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AddByPathResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoveResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MoveRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MoveResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeByPathRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeByPathResponse_Info); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeByPathResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSubTreeRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSubTreeRequest_Body_Order); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSubTreeResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TreeListRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TreeListResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApplyRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApplyResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOpLogRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetOpLogResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthcheckResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthcheckRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_tree_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 42,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_pkg_services_tree_service_proto_goTypes,
+ DependencyIndexes: file_pkg_services_tree_service_proto_depIdxs,
+ EnumInfos: file_pkg_services_tree_service_proto_enumTypes,
+ MessageInfos: file_pkg_services_tree_service_proto_msgTypes,
+ }.Build()
+ File_pkg_services_tree_service_proto = out.File
+ file_pkg_services_tree_service_proto_rawDesc = nil
+ file_pkg_services_tree_service_proto_goTypes = nil
+ file_pkg_services_tree_service_proto_depIdxs = nil
+}
diff --git a/pkg/services/tree/service.proto b/pkg/services/tree/service.proto
index 88bf0bca4..ec63d88ec 100644
--- a/pkg/services/tree/service.proto
+++ b/pkg/services/tree/service.proto
@@ -28,25 +28,25 @@ service TreeService {
// Otherwise, a request is denied.
// Add adds new node to the tree. Invoked by a client.
- rpc Add(AddRequest) returns (AddResponse);
+ rpc Add (AddRequest) returns (AddResponse);
// AddByPath adds new node to the tree by path. Invoked by a client.
- rpc AddByPath(AddByPathRequest) returns (AddByPathResponse);
+ rpc AddByPath (AddByPathRequest) returns (AddByPathResponse);
// Remove removes node from the tree. Invoked by a client.
- rpc Remove(RemoveRequest) returns (RemoveResponse);
+ rpc Remove (RemoveRequest) returns (RemoveResponse);
// Move moves node from one parent to another. Invoked by a client.
- rpc Move(MoveRequest) returns (MoveResponse);
+ rpc Move (MoveRequest) returns (MoveResponse);
// GetNodeByPath returns list of IDs corresponding to a specific filepath.
- rpc GetNodeByPath(GetNodeByPathRequest) returns (GetNodeByPathResponse);
+ rpc GetNodeByPath (GetNodeByPathRequest) returns (GetNodeByPathResponse);
// GetSubTree returns tree corresponding to a specific node.
- rpc GetSubTree(GetSubTreeRequest) returns (stream GetSubTreeResponse);
+ rpc GetSubTree (GetSubTreeRequest) returns (stream GetSubTreeResponse);
// TreeList return list of the existing trees in the container.
- rpc TreeList(TreeListRequest) returns (TreeListResponse);
+ rpc TreeList (TreeListRequest) returns (TreeListResponse);
/* Synchronization API */
// Apply pushes log operation from another node to the current.
// The request must be signed by a container node.
- rpc Apply(ApplyRequest) returns (ApplyResponse);
+ rpc Apply (ApplyRequest) returns (ApplyResponse);
// GetOpLog returns a stream of logged operations starting from some height.
rpc GetOpLog(GetOpLogRequest) returns (stream GetOpLogResponse);
// Healthcheck is a dummy rpc to check service availability
@@ -85,6 +85,7 @@ message AddResponse {
Signature signature = 2;
};
+
message AddByPathRequest {
message Body {
// Container ID in V2 format.
@@ -121,6 +122,7 @@ message AddByPathResponse {
Signature signature = 2;
};
+
message RemoveRequest {
message Body {
// Container ID in V2 format.
@@ -140,7 +142,8 @@ message RemoveRequest {
}
message RemoveResponse {
- message Body {}
+ message Body {
+ }
// Response body.
Body body = 1;
@@ -148,6 +151,7 @@ message RemoveResponse {
Signature signature = 2;
};
+
message MoveRequest {
message Body {
// TODO import neo.fs.v2.refs.ContainerID directly.
@@ -172,7 +176,8 @@ message MoveRequest {
}
message MoveResponse {
- message Body {}
+ message Body {
+ }
// Response body.
Body body = 1;
@@ -180,6 +185,7 @@ message MoveResponse {
Signature signature = 2;
};
+
message GetNodeByPathRequest {
message Body {
// Container ID in V2 format.
@@ -229,6 +235,7 @@ message GetNodeByPathResponse {
Signature signature = 2;
};
+
message GetSubTreeRequest {
message Body {
message Order {
@@ -242,8 +249,8 @@ message GetSubTreeRequest {
bytes container_id = 1;
// The name of the tree.
string tree_id = 2;
- // IDs of the root nodes of a subtree forest.
- repeated uint64 root_id = 3 [ packed = false ];
+ // ID of the root node of a subtree.
+ uint64 root_id = 3;
// Optional depth of the traversal. Zero means return only root.
// Maximum depth is 10.
uint32 depth = 4;
@@ -262,11 +269,11 @@ message GetSubTreeRequest {
message GetSubTreeResponse {
message Body {
// ID of the node.
- repeated uint64 node_id = 1 [ packed = false ];
+ uint64 node_id = 1;
// ID of the parent.
- repeated uint64 parent_id = 2 [ packed = false ];
+ uint64 parent_id = 2;
// Time node was first added to a tree.
- repeated uint64 timestamp = 3 [ packed = false ];
+ uint64 timestamp = 3;
// Node meta-information.
repeated KeyValue meta = 4;
}
@@ -300,6 +307,7 @@ message TreeListResponse {
Signature signature = 2;
}
+
message ApplyRequest {
message Body {
// Container ID in V2 format.
@@ -317,7 +325,8 @@ message ApplyRequest {
}
message ApplyResponse {
- message Body {}
+ message Body {
+ }
// Response body.
Body body = 1;
@@ -325,6 +334,7 @@ message ApplyResponse {
Signature signature = 2;
};
+
message GetOpLogRequest {
message Body {
// Container ID in V2 format.
@@ -356,7 +366,8 @@ message GetOpLogResponse {
};
message HealthcheckResponse {
- message Body {}
+ message Body {
+ }
// Response body.
Body body = 1;
@@ -365,7 +376,8 @@ message HealthcheckResponse {
};
message HealthcheckRequest {
- message Body {}
+ message Body {
+ }
// Request body.
Body body = 1;
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 88d002621..4b50af2aa 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -2,33 +2,7 @@
package tree
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- protowire "google.golang.org/protobuf/encoding/protowire"
- strconv "strconv"
-)
-
-type AddRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- Meta []KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddRequest_Body)(nil)
- _ json.Marshaler = (*AddRequest_Body)(nil)
- _ json.Unmarshaler = (*AddRequest_Body)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -41,322 +15,38 @@ func (x *AddRequest_Body) StableSize() (size int) {
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.ParentId)
for i := range x.Meta {
- size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
+ size += proto.NestedStructureSize(4, x.Meta[i])
}
size += proto.BytesSize(5, x.BearerToken)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.ParentId != 0 {
- mm.AppendUint64(3, x.ParentId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
for i := range x.Meta {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(5, x.BearerToken)
+ offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
}
+ offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = data
- case 4: // Meta
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = append(x.Meta, KeyValue{})
- ff := &x.Meta[len(x.Meta)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 5: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- }
- }
- return nil
-}
-func (x *AddRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *AddRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *AddRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *AddRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *AddRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-func (x *AddRequest_Body) SetParentId(v uint64) {
- x.ParentId = v
-}
-func (x *AddRequest_Body) GetMeta() []KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *AddRequest_Body) SetMeta(v []KeyValue) {
- x.Meta = v
-}
-func (x *AddRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *AddRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Meta {
- if i != 0 {
- out.RawByte(',')
- }
- x.Meta[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "parentId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ParentId = f
- }
- case "meta":
- {
- var f KeyValue
- var list []KeyValue
- in.Delim('[')
- for !in.IsDelim(']') {
- f = KeyValue{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Meta = list
- in.Delim(']')
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddRequest struct {
- Body *AddRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*AddRequest)(nil)
- _ json.Marshaler = (*AddRequest)(nil)
- _ json.Unmarshaler = (*AddRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -369,6 +59,27 @@ func (x *AddRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -384,175 +95,13 @@ func (x *AddRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *AddRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddRequest) GetBody() *AddRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddRequest) SetBody(v *AddRequest_Body) {
- x.Body = v
-}
-func (x *AddRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddRequest_Body
- f = new(AddRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddResponse_Body struct {
- NodeId uint64 `json:"nodeId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddResponse_Body)(nil)
- _ json.Marshaler = (*AddResponse_Body)(nil)
- _ json.Unmarshaler = (*AddResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -564,141 +113,26 @@ func (x *AddResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.NodeId != 0 {
- mm.AppendUint64(1, x.NodeId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // NodeId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NodeId")
- }
- x.NodeId = data
- }
- }
- return nil
-}
-func (x *AddResponse_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-func (x *AddResponse_Body) SetNodeId(v uint64) {
- x.NodeId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "nodeId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.NodeId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddResponse struct {
- Body *AddResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*AddResponse)(nil)
- _ json.Marshaler = (*AddResponse)(nil)
- _ json.Unmarshaler = (*AddResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -711,6 +145,27 @@ func (x *AddResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -726,180 +181,13 @@ func (x *AddResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *AddResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddResponse) GetBody() *AddResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddResponse) SetBody(v *AddResponse_Body) {
- x.Body = v
-}
-func (x *AddResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddResponse_Body
- f = new(AddResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddByPathRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- PathAttribute string `json:"pathAttribute"`
- Path []string `json:"path"`
- Meta []KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddByPathRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddByPathRequest_Body)(nil)
- _ json.Marshaler = (*AddByPathRequest_Body)(nil)
- _ json.Unmarshaler = (*AddByPathRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -912,360 +200,39 @@ func (x *AddByPathRequest_Body) StableSize() (size int) {
size += proto.StringSize(3, x.PathAttribute)
size += proto.RepeatedStringSize(4, x.Path)
for i := range x.Meta {
- size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
+ size += proto.NestedStructureSize(5, x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddByPathRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if len(x.PathAttribute) != 0 {
- mm.AppendString(3, x.PathAttribute)
- }
- for j := range x.Path {
- mm.AppendString(4, x.Path[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
+ offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
for i := range x.Meta {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(6, x.BearerToken)
+ offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
}
+ offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddByPathRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // PathAttribute
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
- }
- x.PathAttribute = data
- case 4: // Path
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Path")
- }
- x.Path = append(x.Path, data)
- case 5: // Meta
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = append(x.Meta, KeyValue{})
- ff := &x.Meta[len(x.Meta)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 6: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- }
- }
- return nil
-}
-func (x *AddByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *AddByPathRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *AddByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *AddByPathRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *AddByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-func (x *AddByPathRequest_Body) SetPathAttribute(v string) {
- x.PathAttribute = v
-}
-func (x *AddByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-func (x *AddByPathRequest_Body) SetPath(v []string) {
- x.Path = v
-}
-func (x *AddByPathRequest_Body) GetMeta() []KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) {
- x.Meta = v
-}
-func (x *AddByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *AddByPathRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddByPathRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"pathAttribute\":"
- out.RawString(prefix)
- out.String(x.PathAttribute)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Path {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Path[i])
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Meta {
- if i != 0 {
- out.RawByte(',')
- }
- x.Meta[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddByPathRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "pathAttribute":
- {
- var f string
- f = in.String()
- x.PathAttribute = f
- }
- case "path":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Path = list
- in.Delim(']')
- }
- case "meta":
- {
- var f KeyValue
- var list []KeyValue
- in.Delim('[')
- for !in.IsDelim(']') {
- f = KeyValue{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Meta = list
- in.Delim(']')
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddByPathRequest struct {
- Body *AddByPathRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddByPathRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*AddByPathRequest)(nil)
- _ json.Marshaler = (*AddByPathRequest)(nil)
- _ json.Unmarshaler = (*AddByPathRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1278,6 +245,27 @@ func (x *AddByPathRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddByPathRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1293,176 +281,13 @@ func (x *AddByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddByPathRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *AddByPathRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddByPathRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddByPathRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddByPathRequest) SetBody(v *AddByPathRequest_Body) {
- x.Body = v
-}
-func (x *AddByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddByPathRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddByPathRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddByPathRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddByPathRequest_Body
- f = new(AddByPathRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddByPathResponse_Body struct {
- Nodes []uint64 `json:"nodes"`
- ParentId uint64 `json:"parentId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddByPathResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*AddByPathResponse_Body)(nil)
- _ json.Marshaler = (*AddByPathResponse_Body)(nil)
- _ json.Unmarshaler = (*AddByPathResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1477,199 +302,27 @@ func (x *AddByPathResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *AddByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddByPathResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Nodes) != 0 {
- mm.AppendUint64s(1, x.Nodes)
- }
- if x.ParentId != 0 {
- mm.AppendUint64(2, x.ParentId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedUInt64Marshal(1, buf[offset:], x.Nodes)
+ offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddByPathResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Nodes
- data, ok := fc.UnpackUint64s(nil)
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Nodes")
- }
- x.Nodes = data
- case 2: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = data
- }
- }
- return nil
-}
-func (x *AddByPathResponse_Body) GetNodes() []uint64 {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-func (x *AddByPathResponse_Body) SetNodes(v []uint64) {
- x.Nodes = v
-}
-func (x *AddByPathResponse_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-func (x *AddByPathResponse_Body) SetParentId(v uint64) {
- x.ParentId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddByPathResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Nodes {
- if i != 0 {
- out.RawByte(',')
- }
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10)
- out.RawByte('"')
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddByPathResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "nodes":
- {
- var f uint64
- var list []uint64
- in.Delim('[')
- for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- list = append(list, f)
- in.WantComma()
- }
- x.Nodes = list
- in.Delim(']')
- }
- case "parentId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ParentId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type AddByPathResponse struct {
- Body *AddByPathResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*AddByPathResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*AddByPathResponse)(nil)
- _ json.Marshaler = (*AddByPathResponse)(nil)
- _ json.Unmarshaler = (*AddByPathResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1682,6 +335,27 @@ func (x *AddByPathResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *AddByPathResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1697,178 +371,13 @@ func (x *AddByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *AddByPathResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *AddByPathResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *AddByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "AddByPathResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(AddByPathResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *AddByPathResponse) SetBody(v *AddByPathResponse_Body) {
- x.Body = v
-}
-func (x *AddByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *AddByPathResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *AddByPathResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *AddByPathResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *AddByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *AddByPathResponse_Body
- f = new(AddByPathResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- NodeId uint64 `json:"nodeId"`
- BearerToken []byte `json:"bearerToken"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveRequest_Body)(nil)
- _ json.Marshaler = (*RemoveRequest_Body)(nil)
- _ json.Unmarshaler = (*RemoveRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1883,263 +392,29 @@ func (x *RemoveRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *RemoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.NodeId != 0 {
- mm.AppendUint64(3, x.NodeId)
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(4, x.BearerToken)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.NodeId)
+ offset += proto.BytesMarshal(4, buf[offset:], x.BearerToken)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // NodeId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NodeId")
- }
- x.NodeId = data
- case 4: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- }
- }
- return nil
-}
-func (x *RemoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *RemoveRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *RemoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *RemoveRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *RemoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-func (x *RemoveRequest_Body) SetNodeId(v uint64) {
- x.NodeId = v
-}
-func (x *RemoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *RemoveRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "nodeId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.NodeId = f
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveRequest struct {
- Body *RemoveRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveRequest)(nil)
- _ json.Marshaler = (*RemoveRequest)(nil)
- _ json.Unmarshaler = (*RemoveRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2152,6 +427,27 @@ func (x *RemoveRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2167,174 +463,13 @@ func (x *RemoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *RemoveRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveRequest) SetBody(v *RemoveRequest_Body) {
- x.Body = v
-}
-func (x *RemoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveRequest_Body
- f = new(RemoveRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveResponse_Body)(nil)
- _ json.Marshaler = (*RemoveResponse_Body)(nil)
- _ json.Unmarshaler = (*RemoveResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2345,93 +480,18 @@ func (x *RemoveResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *RemoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type RemoveResponse struct {
- Body *RemoveResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*RemoveResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*RemoveResponse)(nil)
- _ json.Marshaler = (*RemoveResponse)(nil)
- _ json.Unmarshaler = (*RemoveResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2444,6 +504,27 @@ func (x *RemoveResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *RemoveResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2459,180 +540,13 @@ func (x *RemoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *RemoveResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *RemoveResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *RemoveResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "RemoveResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(RemoveResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *RemoveResponse) SetBody(v *RemoveResponse_Body) {
- x.Body = v
-}
-func (x *RemoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *RemoveResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *RemoveResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *RemoveResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *RemoveResponse_Body
- f = new(RemoveResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type MoveRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- NodeId uint64 `json:"nodeId"`
- Meta []KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*MoveRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*MoveRequest_Body)(nil)
- _ json.Marshaler = (*MoveRequest_Body)(nil)
- _ json.Unmarshaler = (*MoveRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2645,366 +559,39 @@ func (x *MoveRequest_Body) StableSize() (size int) {
size += proto.UInt64Size(3, x.ParentId)
size += proto.UInt64Size(4, x.NodeId)
for i := range x.Meta {
- size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
+ size += proto.NestedStructureSize(5, x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *MoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *MoveRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.ParentId != 0 {
- mm.AppendUint64(3, x.ParentId)
- }
- if x.NodeId != 0 {
- mm.AppendUint64(4, x.NodeId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
+ offset += proto.UInt64Marshal(4, buf[offset:], x.NodeId)
for i := range x.Meta {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(6, x.BearerToken)
+ offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
}
+ offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "MoveRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = data
- case 4: // NodeId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NodeId")
- }
- x.NodeId = data
- case 5: // Meta
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = append(x.Meta, KeyValue{})
- ff := &x.Meta[len(x.Meta)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 6: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- }
- }
- return nil
-}
-func (x *MoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *MoveRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *MoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *MoveRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *MoveRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-func (x *MoveRequest_Body) SetParentId(v uint64) {
- x.ParentId = v
-}
-func (x *MoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-func (x *MoveRequest_Body) SetNodeId(v uint64) {
- x.NodeId = v
-}
-func (x *MoveRequest_Body) GetMeta() []KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *MoveRequest_Body) SetMeta(v []KeyValue) {
- x.Meta = v
-}
-func (x *MoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *MoveRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *MoveRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Meta {
- if i != 0 {
- out.RawByte(',')
- }
- x.Meta[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *MoveRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "parentId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ParentId = f
- }
- case "nodeId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.NodeId = f
- }
- case "meta":
- {
- var f KeyValue
- var list []KeyValue
- in.Delim('[')
- for !in.IsDelim(']') {
- f = KeyValue{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Meta = list
- in.Delim(']')
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type MoveRequest struct {
- Body *MoveRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*MoveRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*MoveRequest)(nil)
- _ json.Marshaler = (*MoveRequest)(nil)
- _ json.Unmarshaler = (*MoveRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3017,6 +604,27 @@ func (x *MoveRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *MoveRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3032,174 +640,13 @@ func (x *MoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *MoveRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *MoveRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *MoveRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "MoveRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(MoveRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *MoveRequest) GetBody() *MoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *MoveRequest) SetBody(v *MoveRequest_Body) {
- x.Body = v
-}
-func (x *MoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *MoveRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *MoveRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *MoveRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *MoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *MoveRequest_Body
- f = new(MoveRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type MoveResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*MoveResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*MoveResponse_Body)(nil)
- _ json.Marshaler = (*MoveResponse_Body)(nil)
- _ json.Unmarshaler = (*MoveResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3210,93 +657,18 @@ func (x *MoveResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *MoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *MoveResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *MoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *MoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "MoveResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *MoveResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *MoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *MoveResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *MoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type MoveResponse struct {
- Body *MoveResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*MoveResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*MoveResponse)(nil)
- _ json.Marshaler = (*MoveResponse)(nil)
- _ json.Unmarshaler = (*MoveResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3309,6 +681,27 @@ func (x *MoveResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *MoveResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3324,182 +717,13 @@ func (x *MoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *MoveResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *MoveResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *MoveResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "MoveResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(MoveResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *MoveResponse) GetBody() *MoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *MoveResponse) SetBody(v *MoveResponse_Body) {
- x.Body = v
-}
-func (x *MoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *MoveResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *MoveResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *MoveResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *MoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *MoveResponse_Body
- f = new(MoveResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNodeByPathRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- PathAttribute string `json:"pathAttribute"`
- Path []string `json:"path"`
- Attributes []string `json:"attributes"`
- LatestOnly bool `json:"latestOnly"`
- AllAttributes bool `json:"allAttributes"`
- BearerToken []byte `json:"bearerToken"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNodeByPathRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest_Body)(nil)
- _ json.Marshaler = (*GetNodeByPathRequest_Body)(nil)
- _ json.Unmarshaler = (*GetNodeByPathRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3518,417 +742,33 @@ func (x *GetNodeByPathRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNodeByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNodeByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetNodeByPathRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if len(x.PathAttribute) != 0 {
- mm.AppendString(3, x.PathAttribute)
- }
- for j := range x.Path {
- mm.AppendString(4, x.Path[j])
- }
- for j := range x.Attributes {
- mm.AppendString(5, x.Attributes[j])
- }
- if x.LatestOnly {
- mm.AppendBool(6, x.LatestOnly)
- }
- if x.AllAttributes {
- mm.AppendBool(7, x.AllAttributes)
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(8, x.BearerToken)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
+ offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
+ offset += proto.RepeatedStringMarshal(5, buf[offset:], x.Attributes)
+ offset += proto.BoolMarshal(6, buf[offset:], x.LatestOnly)
+ offset += proto.BoolMarshal(7, buf[offset:], x.AllAttributes)
+ offset += proto.BytesMarshal(8, buf[offset:], x.BearerToken)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNodeByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // PathAttribute
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
- }
- x.PathAttribute = data
- case 4: // Path
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Path")
- }
- x.Path = append(x.Path, data)
- case 5: // Attributes
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Attributes")
- }
- x.Attributes = append(x.Attributes, data)
- case 6: // LatestOnly
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "LatestOnly")
- }
- x.LatestOnly = data
- case 7: // AllAttributes
- data, ok := fc.Bool()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "AllAttributes")
- }
- x.AllAttributes = data
- case 8: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- }
- }
- return nil
-}
-func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *GetNodeByPathRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *GetNodeByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *GetNodeByPathRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-func (x *GetNodeByPathRequest_Body) SetPathAttribute(v string) {
- x.PathAttribute = v
-}
-func (x *GetNodeByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-func (x *GetNodeByPathRequest_Body) SetPath(v []string) {
- x.Path = v
-}
-func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-func (x *GetNodeByPathRequest_Body) SetAttributes(v []string) {
- x.Attributes = v
-}
-func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
- if x != nil {
- return x.LatestOnly
- }
- return false
-}
-func (x *GetNodeByPathRequest_Body) SetLatestOnly(v bool) {
- x.LatestOnly = v
-}
-func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
- if x != nil {
- return x.AllAttributes
- }
- return false
-}
-func (x *GetNodeByPathRequest_Body) SetAllAttributes(v bool) {
- x.AllAttributes = v
-}
-func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *GetNodeByPathRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNodeByPathRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"pathAttribute\":"
- out.RawString(prefix)
- out.String(x.PathAttribute)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"path\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Path {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Path[i])
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"attributes\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Attributes {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Attributes[i])
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"latestOnly\":"
- out.RawString(prefix)
- out.Bool(x.LatestOnly)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"allAttributes\":"
- out.RawString(prefix)
- out.Bool(x.AllAttributes)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNodeByPathRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "pathAttribute":
- {
- var f string
- f = in.String()
- x.PathAttribute = f
- }
- case "path":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Path = list
- in.Delim(']')
- }
- case "attributes":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Attributes = list
- in.Delim(']')
- }
- case "latestOnly":
- {
- var f bool
- f = in.Bool()
- x.LatestOnly = f
- }
- case "allAttributes":
- {
- var f bool
- f = in.Bool()
- x.AllAttributes = f
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNodeByPathRequest struct {
- Body *GetNodeByPathRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNodeByPathRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest)(nil)
- _ json.Marshaler = (*GetNodeByPathRequest)(nil)
- _ json.Unmarshaler = (*GetNodeByPathRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3941,6 +781,27 @@ func (x *GetNodeByPathRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetNodeByPathRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3956,178 +817,13 @@ func (x *GetNodeByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNodeByPathRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetNodeByPathRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNodeByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetNodeByPathRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetNodeByPathRequest) SetBody(v *GetNodeByPathRequest_Body) {
- x.Body = v
-}
-func (x *GetNodeByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetNodeByPathRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNodeByPathRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNodeByPathRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetNodeByPathRequest_Body
- f = new(GetNodeByPathRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNodeByPathResponse_Info struct {
- NodeId uint64 `json:"nodeId"`
- Timestamp uint64 `json:"timestamp"`
- Meta []KeyValue `json:"meta"`
- ParentId uint64 `json:"parentId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Info)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Info)(nil)
- _ json.Marshaler = (*GetNodeByPathResponse_Info)(nil)
- _ json.Unmarshaler = (*GetNodeByPathResponse_Info)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4138,287 +834,37 @@ func (x *GetNodeByPathResponse_Info) StableSize() (size int) {
size += proto.UInt64Size(1, x.NodeId)
size += proto.UInt64Size(2, x.Timestamp)
for i := range x.Meta {
- size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i])
+ size += proto.NestedStructureSize(3, x.Meta[i])
}
size += proto.UInt64Size(4, x.ParentId)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNodeByPathResponse_Info) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetNodeByPathResponse_Info) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.NodeId != 0 {
- mm.AppendUint64(1, x.NodeId)
- }
- if x.Timestamp != 0 {
- mm.AppendUint64(2, x.Timestamp)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
+ offset += proto.UInt64Marshal(2, buf[offset:], x.Timestamp)
for i := range x.Meta {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
- }
- if x.ParentId != 0 {
- mm.AppendUint64(4, x.ParentId)
+ offset += proto.NestedStructureMarshal(3, buf[offset:], x.Meta[i])
}
+ offset += proto.UInt64Marshal(4, buf[offset:], x.ParentId)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Info")
- }
- switch fc.FieldNum {
- case 1: // NodeId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NodeId")
- }
- x.NodeId = data
- case 2: // Timestamp
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
- }
- x.Timestamp = data
- case 3: // Meta
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = append(x.Meta, KeyValue{})
- ff := &x.Meta[len(x.Meta)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 4: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = data
- }
- }
- return nil
-}
-func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-func (x *GetNodeByPathResponse_Info) SetNodeId(v uint64) {
- x.NodeId = v
-}
-func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
- if x != nil {
- return x.Timestamp
- }
- return 0
-}
-func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) {
- x.Timestamp = v
-}
-func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) {
- x.Meta = v
-}
-func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-func (x *GetNodeByPathResponse_Info) SetParentId(v uint64) {
- x.ParentId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNodeByPathResponse_Info) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"timestamp\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Meta {
- if i != 0 {
- out.RawByte(',')
- }
- x.Meta[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNodeByPathResponse_Info) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "nodeId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.NodeId = f
- }
- case "timestamp":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Timestamp = f
- }
- case "meta":
- {
- var f KeyValue
- var list []KeyValue
- in.Delim('[')
- for !in.IsDelim(']') {
- f = KeyValue{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Meta = list
- in.Delim(']')
- }
- case "parentId":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ParentId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNodeByPathResponse_Body struct {
- Nodes []GetNodeByPathResponse_Info `json:"nodes"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Body)(nil)
- _ json.Marshaler = (*GetNodeByPathResponse_Body)(nil)
- _ json.Unmarshaler = (*GetNodeByPathResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4427,155 +873,33 @@ func (x *GetNodeByPathResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Nodes {
- size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i])
+ size += proto.NestedStructureSize(1, x.Nodes[i])
}
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNodeByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetNodeByPathResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
for i := range x.Nodes {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Nodes[i])
}
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Nodes
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Nodes")
- }
- x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{})
- ff := &x.Nodes[len(x.Nodes)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) {
- x.Nodes = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNodeByPathResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodes\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Nodes {
- if i != 0 {
- out.RawByte(',')
- }
- x.Nodes[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNodeByPathResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "nodes":
- {
- var f GetNodeByPathResponse_Info
- var list []GetNodeByPathResponse_Info
- in.Delim('[')
- for !in.IsDelim(']') {
- f = GetNodeByPathResponse_Info{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Nodes = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetNodeByPathResponse struct {
- Body *GetNodeByPathResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetNodeByPathResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse)(nil)
- _ json.Marshaler = (*GetNodeByPathResponse)(nil)
- _ json.Unmarshaler = (*GetNodeByPathResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4588,6 +912,27 @@ func (x *GetNodeByPathResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetNodeByPathResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -4603,207 +948,13 @@ func (x *GetNodeByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetNodeByPathResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetNodeByPathResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetNodeByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetNodeByPathResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetNodeByPathResponse) SetBody(v *GetNodeByPathResponse_Body) {
- x.Body = v
-}
-func (x *GetNodeByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetNodeByPathResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetNodeByPathResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetNodeByPathResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetNodeByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetNodeByPathResponse_Body
- f = new(GetNodeByPathResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetSubTreeRequest_Body_Order_Direction int32
-
-const (
- GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0
- GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1
-)
-
-var (
- GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{
- 0: "None",
- 1: "Asc",
- }
- GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{
- "None": 0,
- "Asc": 1,
- }
-)
-
-func (x GetSubTreeRequest_Body_Order_Direction) String() string {
- if v, ok := GetSubTreeRequest_Body_Order_Direction_name[int32(x)]; ok {
- return v
- }
- return strconv.FormatInt(int64(x), 10)
-}
-func (x *GetSubTreeRequest_Body_Order_Direction) FromString(s string) bool {
- if v, ok := GetSubTreeRequest_Body_Order_Direction_value[s]; ok {
- *x = GetSubTreeRequest_Body_Order_Direction(v)
- return true
- }
- return false
-}
-
-type GetSubTreeRequest_Body_Order struct {
- Direction GetSubTreeRequest_Body_Order_Direction `json:"direction"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body_Order)(nil)
- _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
- _ json.Marshaler = (*GetSubTreeRequest_Body_Order)(nil)
- _ json.Unmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4815,156 +966,26 @@ func (x *GetSubTreeRequest_Body_Order) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetSubTreeRequest_Body_Order) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetSubTreeRequest_Body_Order) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetSubTreeRequest_Body_Order) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if int32(x.Direction) != 0 {
- mm.AppendInt32(1, int32(x.Direction))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.EnumMarshal(1, buf[offset:], int32(x.Direction))
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetSubTreeRequest_Body_Order) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body_Order")
- }
- switch fc.FieldNum {
- case 1: // Direction
- data, ok := fc.Int32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Direction")
- }
- x.Direction = GetSubTreeRequest_Body_Order_Direction(data)
- }
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction {
- if x != nil {
- return x.Direction
- }
- return 0
-}
-func (x *GetSubTreeRequest_Body_Order) SetDirection(v GetSubTreeRequest_Body_Order_Direction) {
- x.Direction = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetSubTreeRequest_Body_Order) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"direction\":"
- out.RawString(prefix)
- v := int32(x.Direction)
- if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok {
- out.String(vv)
- } else {
- out.Int32(v)
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetSubTreeRequest_Body_Order) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetSubTreeRequest_Body_Order) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "direction":
- {
- var f GetSubTreeRequest_Body_Order_Direction
- var parsedValue GetSubTreeRequest_Body_Order_Direction
- switch v := in.Interface().(type) {
- case string:
- if vv, ok := GetSubTreeRequest_Body_Order_Direction_value[v]; ok {
- parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
- break
- }
- vv, err := strconv.ParseInt(v, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
- case float64:
- parsedValue = GetSubTreeRequest_Body_Order_Direction(v)
- }
- f = parsedValue
- x.Direction = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetSubTreeRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- RootId []uint64 `json:"rootId"`
- Depth uint32 `json:"depth"`
- BearerToken []byte `json:"bearerToken"`
- OrderBy *GetSubTreeRequest_Body_Order `json:"orderBy"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body)(nil)
- _ json.Marshaler = (*GetSubTreeRequest_Body)(nil)
- _ json.Unmarshaler = (*GetSubTreeRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -4974,366 +995,38 @@ func (x *GetSubTreeRequest_Body) StableSize() (size int) {
}
size += proto.BytesSize(1, x.ContainerId)
size += proto.StringSize(2, x.TreeId)
- for i := range x.RootId {
- size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.RootId[i]))
- }
+ size += proto.UInt64Size(3, x.RootId)
size += proto.UInt32Size(4, x.Depth)
size += proto.BytesSize(5, x.BearerToken)
size += proto.NestedStructureSize(6, x.OrderBy)
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetSubTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetSubTreeRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- for j := range x.RootId {
- mm.AppendUint64(3, x.RootId[j])
- }
- if x.Depth != 0 {
- mm.AppendUint32(4, x.Depth)
- }
- if len(x.BearerToken) != 0 {
- mm.AppendBytes(5, x.BearerToken)
- }
- if x.OrderBy != nil {
- x.OrderBy.EmitProtobuf(mm.AppendMessage(6))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.RootId)
+ offset += proto.UInt32Marshal(4, buf[offset:], x.Depth)
+ offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
+ offset += proto.NestedStructureMarshal(6, buf[offset:], x.OrderBy)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetSubTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // RootId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "RootId")
- }
- x.RootId = append(x.RootId, data)
- case 4: // Depth
- data, ok := fc.Uint32()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Depth")
- }
- x.Depth = data
- case 5: // BearerToken
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
- }
- x.BearerToken = data
- case 6: // OrderBy
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "OrderBy")
- }
- x.OrderBy = new(GetSubTreeRequest_Body_Order)
- if err := x.OrderBy.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *GetSubTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *GetSubTreeRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *GetSubTreeRequest_Body) GetRootId() []uint64 {
- if x != nil {
- return x.RootId
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body) SetRootId(v []uint64) {
- x.RootId = v
-}
-func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
- if x != nil {
- return x.Depth
- }
- return 0
-}
-func (x *GetSubTreeRequest_Body) SetDepth(v uint32) {
- x.Depth = v
-}
-func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body) SetBearerToken(v []byte) {
- x.BearerToken = v
-}
-func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order {
- if x != nil {
- return x.OrderBy
- }
- return nil
-}
-func (x *GetSubTreeRequest_Body) SetOrderBy(v *GetSubTreeRequest_Body_Order) {
- x.OrderBy = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetSubTreeRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"rootId\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.RootId {
- if i != 0 {
- out.RawByte(',')
- }
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10)
- out.RawByte('"')
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"depth\":"
- out.RawString(prefix)
- out.Uint32(x.Depth)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"bearerToken\":"
- out.RawString(prefix)
- if x.BearerToken != nil {
- out.Base64Bytes(x.BearerToken)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"orderBy\":"
- out.RawString(prefix)
- x.OrderBy.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetSubTreeRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "rootId":
- {
- var f uint64
- var list []uint64
- in.Delim('[')
- for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- list = append(list, f)
- in.WantComma()
- }
- x.RootId = list
- in.Delim(']')
- }
- case "depth":
- {
- var f uint32
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 32)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint32(v)
- f = pv
- x.Depth = f
- }
- case "bearerToken":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.BearerToken = f
- }
- case "orderBy":
- {
- var f *GetSubTreeRequest_Body_Order
- f = new(GetSubTreeRequest_Body_Order)
- f.UnmarshalEasyJSON(in)
- x.OrderBy = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetSubTreeRequest struct {
- Body *GetSubTreeRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetSubTreeRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest)(nil)
- _ json.Marshaler = (*GetSubTreeRequest)(nil)
- _ json.Unmarshaler = (*GetSubTreeRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5346,6 +1039,27 @@ func (x *GetSubTreeRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetSubTreeRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -5361,178 +1075,13 @@ func (x *GetSubTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetSubTreeRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetSubTreeRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetSubTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetSubTreeRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetSubTreeRequest) SetBody(v *GetSubTreeRequest_Body) {
- x.Body = v
-}
-func (x *GetSubTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetSubTreeRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetSubTreeRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetSubTreeRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetSubTreeRequest_Body
- f = new(GetSubTreeRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetSubTreeResponse_Body struct {
- NodeId []uint64 `json:"nodeId"`
- ParentId []uint64 `json:"parentId"`
- Timestamp []uint64 `json:"timestamp"`
- Meta []KeyValue `json:"meta"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetSubTreeResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse_Body)(nil)
- _ json.Marshaler = (*GetSubTreeResponse_Body)(nil)
- _ json.Unmarshaler = (*GetSubTreeResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5540,339 +1089,40 @@ func (x *GetSubTreeResponse_Body) StableSize() (size int) {
if x == nil {
return 0
}
- for i := range x.NodeId {
- size += protowire.SizeGroup(protowire.Number(1), protowire.SizeVarint(x.NodeId[i]))
- }
- for i := range x.ParentId {
- size += protowire.SizeGroup(protowire.Number(2), protowire.SizeVarint(x.ParentId[i]))
- }
- for i := range x.Timestamp {
- size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i]))
- }
+ size += proto.UInt64Size(1, x.NodeId)
+ size += proto.UInt64Size(2, x.ParentId)
+ size += proto.UInt64Size(3, x.Timestamp)
for i := range x.Meta {
- size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
+ size += proto.NestedStructureSize(4, x.Meta[i])
}
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetSubTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetSubTreeResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.NodeId {
- mm.AppendUint64(1, x.NodeId[j])
- }
- for j := range x.ParentId {
- mm.AppendUint64(2, x.ParentId[j])
- }
- for j := range x.Timestamp {
- mm.AppendUint64(3, x.Timestamp[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
+ offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.Timestamp)
for i := range x.Meta {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
+ offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
}
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // NodeId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "NodeId")
- }
- x.NodeId = append(x.NodeId, data)
- case 2: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = append(x.ParentId, data)
- case 3: // Timestamp
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
- }
- x.Timestamp = append(x.Timestamp, data)
- case 4: // Meta
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = append(x.Meta, KeyValue{})
- ff := &x.Meta[len(x.Meta)-1]
- if err := ff.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 {
- if x != nil {
- return x.NodeId
- }
- return nil
-}
-func (x *GetSubTreeResponse_Body) SetNodeId(v []uint64) {
- x.NodeId = v
-}
-func (x *GetSubTreeResponse_Body) GetParentId() []uint64 {
- if x != nil {
- return x.ParentId
- }
- return nil
-}
-func (x *GetSubTreeResponse_Body) SetParentId(v []uint64) {
- x.ParentId = v
-}
-func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) {
- x.Timestamp = v
-}
-func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) {
- x.Meta = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetSubTreeResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"nodeId\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.NodeId {
- if i != 0 {
- out.RawByte(',')
- }
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10)
- out.RawByte('"')
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentId\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.ParentId {
- if i != 0 {
- out.RawByte(',')
- }
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10)
- out.RawByte('"')
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"timestamp\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Timestamp {
- if i != 0 {
- out.RawByte(',')
- }
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10)
- out.RawByte('"')
- }
- out.RawByte(']')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Meta {
- if i != 0 {
- out.RawByte(',')
- }
- x.Meta[i].MarshalEasyJSON(out)
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetSubTreeResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "nodeId":
- {
- var f uint64
- var list []uint64
- in.Delim('[')
- for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- list = append(list, f)
- in.WantComma()
- }
- x.NodeId = list
- in.Delim(']')
- }
- case "parentId":
- {
- var f uint64
- var list []uint64
- in.Delim('[')
- for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- list = append(list, f)
- in.WantComma()
- }
- x.ParentId = list
- in.Delim(']')
- }
- case "timestamp":
- {
- var f uint64
- var list []uint64
- in.Delim('[')
- for !in.IsDelim(']') {
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- list = append(list, f)
- in.WantComma()
- }
- x.Timestamp = list
- in.Delim(']')
- }
- case "meta":
- {
- var f KeyValue
- var list []KeyValue
- in.Delim('[')
- for !in.IsDelim(']') {
- f = KeyValue{}
- f.UnmarshalEasyJSON(in)
- list = append(list, f)
- in.WantComma()
- }
- x.Meta = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetSubTreeResponse struct {
- Body *GetSubTreeResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetSubTreeResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse)(nil)
- _ json.Marshaler = (*GetSubTreeResponse)(nil)
- _ json.Unmarshaler = (*GetSubTreeResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -5885,6 +1135,27 @@ func (x *GetSubTreeResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetSubTreeResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -5900,175 +1171,13 @@ func (x *GetSubTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetSubTreeResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetSubTreeResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetSubTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetSubTreeResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetSubTreeResponse) SetBody(v *GetSubTreeResponse_Body) {
- x.Body = v
-}
-func (x *GetSubTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetSubTreeResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetSubTreeResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetSubTreeResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetSubTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetSubTreeResponse_Body
- f = new(GetSubTreeResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TreeListRequest_Body struct {
- ContainerId []byte `json:"containerId"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TreeListRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*TreeListRequest_Body)(nil)
- _ json.Marshaler = (*TreeListRequest_Body)(nil)
- _ json.Unmarshaler = (*TreeListRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6080,141 +1189,26 @@ func (x *TreeListRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TreeListRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *TreeListRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TreeListRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TreeListRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TreeListRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- }
- }
- return nil
-}
-func (x *TreeListRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *TreeListRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TreeListRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TreeListRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TreeListRequest struct {
- Body *TreeListRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TreeListRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*TreeListRequest)(nil)
- _ json.Marshaler = (*TreeListRequest)(nil)
- _ json.Unmarshaler = (*TreeListRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6227,6 +1221,27 @@ func (x *TreeListRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TreeListRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -6242,175 +1257,13 @@ func (x *TreeListRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TreeListRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *TreeListRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TreeListRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TreeListRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(TreeListRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *TreeListRequest) SetBody(v *TreeListRequest_Body) {
- x.Body = v
-}
-func (x *TreeListRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *TreeListRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TreeListRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TreeListRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TreeListRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *TreeListRequest_Body
- f = new(TreeListRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TreeListResponse_Body struct {
- Ids []string `json:"ids"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TreeListResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*TreeListResponse_Body)(nil)
- _ json.Marshaler = (*TreeListResponse_Body)(nil)
- _ json.Unmarshaler = (*TreeListResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6422,145 +1275,26 @@ func (x *TreeListResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TreeListResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *TreeListResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TreeListResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- for j := range x.Ids {
- mm.AppendString(1, x.Ids[j])
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.RepeatedStringMarshal(1, buf[offset:], x.Ids)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TreeListResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TreeListResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Ids
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Ids")
- }
- x.Ids = append(x.Ids, data)
- }
- }
- return nil
-}
-func (x *TreeListResponse_Body) GetIds() []string {
- if x != nil {
- return x.Ids
- }
- return nil
-}
-func (x *TreeListResponse_Body) SetIds(v []string) {
- x.Ids = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TreeListResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"ids\":"
- out.RawString(prefix)
- out.RawByte('[')
- for i := range x.Ids {
- if i != 0 {
- out.RawByte(',')
- }
- out.String(x.Ids[i])
- }
- out.RawByte(']')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TreeListResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TreeListResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "ids":
- {
- var f string
- var list []string
- in.Delim('[')
- for !in.IsDelim(']') {
- f = in.String()
- list = append(list, f)
- in.WantComma()
- }
- x.Ids = list
- in.Delim(']')
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type TreeListResponse struct {
- Body *TreeListResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*TreeListResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*TreeListResponse)(nil)
- _ json.Marshaler = (*TreeListResponse)(nil)
- _ json.Unmarshaler = (*TreeListResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6573,6 +1307,27 @@ func (x *TreeListResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *TreeListResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -6588,177 +1343,13 @@ func (x *TreeListResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *TreeListResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *TreeListResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *TreeListResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "TreeListResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(TreeListResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *TreeListResponse) SetBody(v *TreeListResponse_Body) {
- x.Body = v
-}
-func (x *TreeListResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *TreeListResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *TreeListResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *TreeListResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *TreeListResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *TreeListResponse_Body
- f = new(TreeListResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ApplyRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- Operation *LogMove `json:"operation"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ApplyRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ApplyRequest_Body)(nil)
- _ json.Marshaler = (*ApplyRequest_Body)(nil)
- _ json.Unmarshaler = (*ApplyRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6772,213 +1363,28 @@ func (x *ApplyRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ApplyRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ApplyRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.Operation != nil {
- x.Operation.EmitProtobuf(mm.AppendMessage(3))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.NestedStructureMarshal(3, buf[offset:], x.Operation)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ApplyRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ApplyRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // Operation
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Operation")
- }
- x.Operation = new(LogMove)
- if err := x.Operation.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ApplyRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *ApplyRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *ApplyRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *ApplyRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *ApplyRequest_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-func (x *ApplyRequest_Body) SetOperation(v *LogMove) {
- x.Operation = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ApplyRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"operation\":"
- out.RawString(prefix)
- x.Operation.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ApplyRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "operation":
- {
- var f *LogMove
- f = new(LogMove)
- f.UnmarshalEasyJSON(in)
- x.Operation = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ApplyRequest struct {
- Body *ApplyRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ApplyRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*ApplyRequest)(nil)
- _ json.Marshaler = (*ApplyRequest)(nil)
- _ json.Unmarshaler = (*ApplyRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -6991,6 +1397,27 @@ func (x *ApplyRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ApplyRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7006,174 +1433,13 @@ func (x *ApplyRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ApplyRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *ApplyRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ApplyRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ApplyRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ApplyRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ApplyRequest) SetBody(v *ApplyRequest_Body) {
- x.Body = v
-}
-func (x *ApplyRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ApplyRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ApplyRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ApplyRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ApplyRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ApplyRequest_Body
- f = new(ApplyRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ApplyResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ApplyResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*ApplyResponse_Body)(nil)
- _ json.Marshaler = (*ApplyResponse_Body)(nil)
- _ json.Unmarshaler = (*ApplyResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7184,93 +1450,18 @@ func (x *ApplyResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ApplyResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ApplyResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *ApplyResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ApplyResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ApplyResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ApplyResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ApplyResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ApplyResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ApplyResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type ApplyResponse struct {
- Body *ApplyResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*ApplyResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*ApplyResponse)(nil)
- _ json.Marshaler = (*ApplyResponse)(nil)
- _ json.Unmarshaler = (*ApplyResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7283,6 +1474,27 @@ func (x *ApplyResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *ApplyResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7298,178 +1510,13 @@ func (x *ApplyResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *ApplyResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *ApplyResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *ApplyResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "ApplyResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(ApplyResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *ApplyResponse) SetBody(v *ApplyResponse_Body) {
- x.Body = v
-}
-func (x *ApplyResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *ApplyResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *ApplyResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *ApplyResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *ApplyResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *ApplyResponse_Body
- f = new(ApplyResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetOpLogRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- Height uint64 `json:"height"`
- Count uint64 `json:"count"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetOpLogRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetOpLogRequest_Body)(nil)
- _ json.Marshaler = (*GetOpLogRequest_Body)(nil)
- _ json.Unmarshaler = (*GetOpLogRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7484,263 +1531,29 @@ func (x *GetOpLogRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetOpLogRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetOpLogRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetOpLogRequest_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.ContainerId) != 0 {
- mm.AppendBytes(1, x.ContainerId)
- }
- if len(x.TreeId) != 0 {
- mm.AppendString(2, x.TreeId)
- }
- if x.Height != 0 {
- mm.AppendUint64(3, x.Height)
- }
- if x.Count != 0 {
- mm.AppendUint64(4, x.Count)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
+ offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
+ offset += proto.UInt64Marshal(4, buf[offset:], x.Count)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetOpLogRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest_Body")
- }
- switch fc.FieldNum {
- case 1: // ContainerId
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
- }
- x.ContainerId = data
- case 2: // TreeId
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "TreeId")
- }
- x.TreeId = data
- case 3: // Height
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Height")
- }
- x.Height = data
- case 4: // Count
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Count")
- }
- x.Count = data
- }
- }
- return nil
-}
-func (x *GetOpLogRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-func (x *GetOpLogRequest_Body) SetContainerId(v []byte) {
- x.ContainerId = v
-}
-func (x *GetOpLogRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-func (x *GetOpLogRequest_Body) SetTreeId(v string) {
- x.TreeId = v
-}
-func (x *GetOpLogRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-func (x *GetOpLogRequest_Body) SetHeight(v uint64) {
- x.Height = v
-}
-func (x *GetOpLogRequest_Body) GetCount() uint64 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-func (x *GetOpLogRequest_Body) SetCount(v uint64) {
- x.Count = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetOpLogRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"containerId\":"
- out.RawString(prefix)
- if x.ContainerId != nil {
- out.Base64Bytes(x.ContainerId)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"treeId\":"
- out.RawString(prefix)
- out.String(x.TreeId)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"height\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"count\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetOpLogRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "containerId":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.ContainerId = f
- }
- case "treeId":
- {
- var f string
- f = in.String()
- x.TreeId = f
- }
- case "height":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Height = f
- }
- case "count":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.Count = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetOpLogRequest struct {
- Body *GetOpLogRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetOpLogRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*GetOpLogRequest)(nil)
- _ json.Marshaler = (*GetOpLogRequest)(nil)
- _ json.Unmarshaler = (*GetOpLogRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7753,6 +1566,27 @@ func (x *GetOpLogRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetOpLogRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -7768,175 +1602,13 @@ func (x *GetOpLogRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetOpLogRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetOpLogRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetOpLogRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetOpLogRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetOpLogRequest) SetBody(v *GetOpLogRequest_Body) {
- x.Body = v
-}
-func (x *GetOpLogRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetOpLogRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetOpLogRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetOpLogRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetOpLogRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetOpLogRequest_Body
- f = new(GetOpLogRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetOpLogResponse_Body struct {
- Operation *LogMove `json:"operation"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetOpLogResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*GetOpLogResponse_Body)(nil)
- _ json.Marshaler = (*GetOpLogResponse_Body)(nil)
- _ json.Unmarshaler = (*GetOpLogResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -7948,135 +1620,26 @@ func (x *GetOpLogResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetOpLogResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetOpLogResponse_Body) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.Operation != nil {
- x.Operation.EmitProtobuf(mm.AppendMessage(1))
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Operation)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetOpLogResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse_Body")
- }
- switch fc.FieldNum {
- case 1: // Operation
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Operation")
- }
- x.Operation = new(LogMove)
- if err := x.Operation.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-func (x *GetOpLogResponse_Body) SetOperation(v *LogMove) {
- x.Operation = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetOpLogResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"operation\":"
- out.RawString(prefix)
- x.Operation.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetOpLogResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetOpLogResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "operation":
- {
- var f *LogMove
- f = new(LogMove)
- f.UnmarshalEasyJSON(in)
- x.Operation = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type GetOpLogResponse struct {
- Body *GetOpLogResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*GetOpLogResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*GetOpLogResponse)(nil)
- _ json.Marshaler = (*GetOpLogResponse)(nil)
- _ json.Unmarshaler = (*GetOpLogResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8089,6 +1652,27 @@ func (x *GetOpLogResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *GetOpLogResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -8104,174 +1688,13 @@ func (x *GetOpLogResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *GetOpLogResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *GetOpLogResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *GetOpLogResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(GetOpLogResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *GetOpLogResponse) SetBody(v *GetOpLogResponse_Body) {
- x.Body = v
-}
-func (x *GetOpLogResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *GetOpLogResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *GetOpLogResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *GetOpLogResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *GetOpLogResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *GetOpLogResponse_Body
- f = new(GetOpLogResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthcheckResponse_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthcheckResponse_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthcheckResponse_Body)(nil)
- _ json.Marshaler = (*HealthcheckResponse_Body)(nil)
- _ json.Unmarshaler = (*HealthcheckResponse_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8282,93 +1705,18 @@ func (x *HealthcheckResponse_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthcheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthcheckResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *HealthcheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthcheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthcheckResponse_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthcheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthcheckResponse_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthcheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthcheckResponse struct {
- Body *HealthcheckResponse_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthcheckResponse)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthcheckResponse)(nil)
- _ json.Marshaler = (*HealthcheckResponse)(nil)
- _ json.Unmarshaler = (*HealthcheckResponse)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8381,6 +1729,27 @@ func (x *HealthcheckResponse) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthcheckResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -8396,174 +1765,13 @@ func (x *HealthcheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthcheckResponse) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+func (x *HealthcheckResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
}
-func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthcheckResponse) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthcheckResponse_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthcheckResponse) SetBody(v *HealthcheckResponse_Body) {
- x.Body = v
-}
-func (x *HealthcheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthcheckResponse) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthcheckResponse) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthcheckResponse) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthcheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthcheckResponse_Body
- f = new(HealthcheckResponse_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthcheckRequest_Body struct {
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthcheckRequest_Body)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthcheckRequest_Body)(nil)
- _ json.Marshaler = (*HealthcheckRequest_Body)(nil)
- _ json.Unmarshaler = (*HealthcheckRequest_Body)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8574,93 +1782,18 @@ func (x *HealthcheckRequest_Body) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthcheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthcheckRequest_Body) StableMarshal(buf []byte) []byte {
+ return buf
}
-func (x *HealthcheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthcheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest_Body")
- }
- switch fc.FieldNum {
- }
- }
- return nil
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthcheckRequest_Body) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthcheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- out.RawByte('{')
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthcheckRequest_Body) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthcheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type HealthcheckRequest struct {
- Body *HealthcheckRequest_Body `json:"body"`
- Signature *Signature `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*HealthcheckRequest)(nil)
- _ encoding.ProtoUnmarshaler = (*HealthcheckRequest)(nil)
- _ json.Marshaler = (*HealthcheckRequest)(nil)
- _ json.Unmarshaler = (*HealthcheckRequest)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -8673,6 +1806,27 @@ func (x *HealthcheckRequest) StableSize() (size int) {
return size
}
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *HealthcheckRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -8688,160 +1842,9 @@ func (x *HealthcheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().MarshalProtobuf(buf), nil
+ return x.GetBody().StableMarshal(buf), nil
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *HealthcheckRequest) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
- if x == nil {
- return
- }
- if x.Body != nil {
- x.Body.EmitProtobuf(mm.AppendMessage(1))
- }
- if x.Signature != nil {
- x.Signature.EmitProtobuf(mm.AppendMessage(2))
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *HealthcheckRequest) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest")
- }
- switch fc.FieldNum {
- case 1: // Body
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Body")
- }
- x.Body = new(HealthcheckRequest_Body)
- if err := x.Body.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- case 2: // Signature
- data, ok := fc.MessageData()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Signature")
- }
- x.Signature = new(Signature)
- if err := x.Signature.UnmarshalProtobuf(data); err != nil {
- return fmt.Errorf("unmarshal: %w", err)
- }
- }
- }
- return nil
-}
-func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-func (x *HealthcheckRequest) SetBody(v *HealthcheckRequest_Body) {
- x.Body = v
-}
-func (x *HealthcheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-func (x *HealthcheckRequest) SetSignature(v *Signature) {
- x.Signature = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *HealthcheckRequest) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"body\":"
- out.RawString(prefix)
- x.Body.MarshalEasyJSON(out)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- x.Signature.MarshalEasyJSON(out)
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *HealthcheckRequest) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *HealthcheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "body":
- {
- var f *HealthcheckRequest_Body
- f = new(HealthcheckRequest_Body)
- f.UnmarshalEasyJSON(in)
- x.Body = f
- }
- case "signature":
- {
- var f *Signature
- f = new(Signature)
- f.UnmarshalEasyJSON(in)
- x.Signature = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
+func (x *HealthcheckRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
}
diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go
index 63f96e11a..2c0828951 100644
--- a/pkg/services/tree/service_grpc.pb.go
+++ b/pkg/services/tree/service_grpc.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.0
+// - protoc v3.21.9
// source: pkg/services/tree/service.proto
package tree
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 8221a4546..6dad22774 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -2,23 +2,24 @@ package tree
import (
"bytes"
- "context"
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "go.uber.org/zap"
)
type message interface {
@@ -28,17 +29,24 @@ type message interface {
SetSignature(*Signature)
}
-var (
- errBearerWrongContainer = errors.New("bearer token is created for another container")
- errBearerSignature = errors.New("invalid bearer token signature")
-)
+func basicACLErr(op acl.Op) error {
+ return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
+}
+
+func eACLErr(op eacl.Operation, err error) error {
+ return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
+}
+
+var errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
+var errBearerWrongContainer = errors.New("bearer token is created for another container")
+var errBearerSignature = errors.New("invalid bearer token signature")
// verifyClient verifies if the request for a client operation
// was signed by a key allowed by (e)ACL rules.
// Operation must be one of:
// - 1. ObjectPut;
// - 2. ObjectGet.
-func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error {
+func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error {
err := verifyMessage(req)
if err != nil {
return err
@@ -49,37 +57,65 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return err
}
- cnr, err := s.cnrSource.Get(ctx, cid)
+ cnr, err := s.cnrSource.Get(cid)
if err != nil {
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- bt, err := parseBearer(rawBearer, cid)
+ eaclOp := eACLOp(op)
+
+ bt, err := parseBearer(rawBearer, cid, eaclOp)
if err != nil {
- return fmt.Errorf("access to operation %s is denied: %w", op, err)
+ return err
}
- role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
+ role, err := roleFromReq(cnr, req, bt)
if err != nil {
return fmt.Errorf("can't get request role: %w", err)
}
- if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil {
- return apeErr(err)
- }
- return nil
-}
+ basicACL := cnr.Value.BasicACL()
-func apeErr(err error) error {
- var chRouterErr *checkercore.ChainRouterError
- if !errors.As(err, &chRouterErr) {
- errServerInternal := &apistatus.ServerInternal{}
- apistatus.WriteInternalServerErr(errServerInternal, err)
- return errServerInternal
+ if !basicACL.IsOpAllowed(op, role) {
+ return basicACLErr(op)
}
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+
+ if !basicACL.Extendable() {
+ return nil
+ }
+
+ var useBearer bool
+ if len(rawBearer) != 0 {
+ if !basicACL.AllowedBearerRules(op) {
+ s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
+ zap.String("cid", cid.EncodeToString()),
+ zap.Stringer("op", op),
+ )
+ } else {
+ useBearer = true
+ }
+ }
+
+ var tb eacl.Table
+ signer := req.GetSignature().GetKey()
+ if useBearer && !bt.Impersonate() {
+ if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) {
+ return eACLErr(eaclOp, errBearerWrongOwner)
+ }
+ tb = bt.EACLTable()
+ } else {
+ tbCore, err := s.eaclSource.GetEACL(cid)
+ if err != nil {
+ return handleGetEACLError(err)
+ }
+ tb = *tbCore.Value
+
+ if useBearer && bt.Impersonate() {
+ signer = bt.SigningKeyBytes()
+ }
+ }
+
+ return checkEACL(tb, signer, eACLRole(role), eaclOp)
}
// Returns true iff the operation is read-only and request was signed
@@ -95,32 +131,40 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
}
key := sign.GetKey()
- for _, currentKey := range *s.authorizedKeys.Load() {
- if bytes.Equal(currentKey, key) {
+ for i := range s.authorizedKeys {
+ if bytes.Equal(s.authorizedKeys[i], key) {
return true, nil
}
}
return false, nil
}
-func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
+func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) {
if len(rawBearer) == 0 {
return nil, nil
}
bt := new(bearer.Token)
if err := bt.Unmarshal(rawBearer); err != nil {
- return nil, fmt.Errorf("invalid bearer token: %w", err)
+ return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
}
if !bt.AssertContainer(cid) {
- return nil, errBearerWrongContainer
+ return nil, eACLErr(eaclOp, errBearerWrongContainer)
}
if !bt.VerifySignature() {
- return nil, errBearerSignature
+ return nil, eACLErr(eaclOp, errBearerSignature)
}
return bt, nil
}
+func handleGetEACLError(err error) error {
+ if client.IsErrEACLNotFound(err) {
+ return nil
+ }
+
+ return fmt.Errorf("get eACL table: %w", err)
+}
+
func verifyMessage(m message) error {
binBody, err := m.ReadSignedData(nil)
if err != nil {
@@ -171,7 +215,7 @@ func SignMessage(m message, key *ecdsa.PrivateKey) error {
return nil
}
-func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, *keys.PublicKey, error) {
+func roleFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, error) {
role := acl.RoleOthers
owner := cnr.Value.Owner()
@@ -182,7 +226,7 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
pub, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
if err != nil {
- return role, nil, fmt.Errorf("invalid public key: %w", err)
+ return role, fmt.Errorf("invalid public key: %w", err)
}
var reqSigner user.ID
@@ -192,5 +236,84 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
role = acl.RoleOwner
}
- return role, pub, nil
+ return role, nil
+}
+
+func eACLOp(op acl.Op) eacl.Operation {
+ switch op {
+ case acl.OpObjectGet:
+ return eacl.OperationGet
+ case acl.OpObjectPut:
+ return eacl.OperationPut
+ default:
+ panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
+ }
+}
+
+func eACLRole(role acl.Role) eacl.Role {
+ switch role {
+ case acl.RoleOwner:
+ return eacl.RoleUser
+ case acl.RoleOthers:
+ return eacl.RoleOthers
+ default:
+ panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
+ }
+}
+
+var errDENY = errors.New("DENY eACL rule")
+var errNoAllowRules = errors.New("not found allowing rules for the request")
+
+// checkEACL searches for the eACL rules that could be applied to the request
+// (a tuple of a signer key, his FrostFS role and a request operation).
+// It does not filter the request by the filters of the eACL table since tree
+// requests do not contain any "object" information that could be filtered and,
+// therefore, filtering leads to unexpected results.
+// The code was copied with the minor updates from the SDK repo:
+// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
+func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
+ for _, record := range tb.Records() {
+ // check type of operation
+ if record.Operation() != op {
+ continue
+ }
+
+ // check target
+ if !targetMatches(record, role, signer) {
+ continue
+ }
+
+ switch a := record.Action(); a {
+ case eacl.ActionAllow:
+ return nil
+ case eacl.ActionDeny:
+ return eACLErr(op, errDENY)
+ default:
+ return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
+ }
+ }
+
+ return eACLErr(op, errNoAllowRules)
+}
+
+func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
+ for _, target := range rec.Targets() {
+ // check public key match
+ if pubs := target.BinaryKeys(); len(pubs) != 0 {
+ for _, key := range pubs {
+ if bytes.Equal(key, signer) {
+ return true
+ }
+ }
+
+ continue
+ }
+
+ // check target group match
+ if role == target.Role() {
+ return true
+ }
+ }
+
+ return false
}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 13a5c1395..1449d5756 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -1,73 +1,34 @@
package tree
import (
- "context"
"crypto/ecdsa"
"crypto/sha256"
- "encoding/hex"
"errors"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
+ aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
- aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
- "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
)
-const versionTreeID = "version"
-
type dummyNetmapSource struct {
netmap.Source
}
-type dummySubjectProvider struct {
- subjects map[util.Uint160]client.SubjectExtended
-}
-
-func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
- res := s.subjects[addr]
- return &client.Subject{
- PrimaryKey: res.PrimaryKey,
- AdditionalKeys: res.AdditionalKeys,
- Namespace: res.Namespace,
- Name: res.Name,
- KV: res.KV,
- }, nil
-}
-
-func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
- res := s.subjects[addr]
- return &res, nil
-}
-
-type dummyEpochSource struct {
- epoch uint64
-}
-
-func (s dummyEpochSource) CurrentEpoch() uint64 {
- return s.epoch
-}
-
type dummyContainerSource map[string]*containercore.Container
-func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
+func (s dummyContainerSource) List() ([]cid.ID, error) {
res := make([]cid.ID, 0, len(s))
var cnr cid.ID
@@ -83,7 +44,7 @@ func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) {
return res, nil
}
-func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) {
+func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
cnt, ok := s[id.String()]
if !ok {
return nil, errors.New("container not found")
@@ -91,10 +52,20 @@ func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercor
return cnt, nil
}
-func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) {
+func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
return &containercore.DelInfo{}, nil
}
+type dummyEACLSource map[string]*containercore.EACL
+
+func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
+ cntEACL, ok := s[id.String()]
+ if !ok {
+ return nil, errors.New("container not found")
+ }
+ return cntEACL, nil
+}
+
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -109,8 +80,6 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
-const currentEpoch = 123
-
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -129,30 +98,22 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
- e := inmemory.NewInMemoryLocalOverrides()
- e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
- Type: engine.Container,
- Name: cid1.EncodeToString(),
- }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
- frostfsidProvider := dummySubjectProvider{
- subjects: make(map[util.Uint160]client.SubjectExtended),
- }
-
s := &Service{
cfg: cfg{
- log: test.NewLogger(t),
+ log: test.NewLogger(t, true),
key: &privs[0].PrivateKey,
nmSource: dummyNetmapSource{},
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
- frostfsidSubjectProvider: frostfsidProvider,
- state: dummyEpochSource{epoch: currentEpoch},
+ eaclSource: dummyEACLSource{
+ cid1.String(): &containercore.EACL{
+ Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()),
+ },
+ },
},
- apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
- s.cfg.authorizedKeys.Store(&[][]byte{})
rawCID1 := make([]byte, sha256.Size)
cid1.Encode(rawCID1)
@@ -161,7 +122,7 @@ func TestMessageSign(t *testing.T) {
ContainerId: rawCID1,
ParentId: 1,
NodeId: 2,
- Meta: []KeyValue{
+ Meta: []*KeyValue{
{Key: "kkk", Value: []byte("vvv")},
},
},
@@ -171,26 +132,26 @@ func TestMessageSign(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRW)
t.Run("missing signature, no panic", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(req, cid2, nil, op))
})
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.NoError(t, s.verifyClient(req, cid1, nil, op))
t.Run("invalid CID", func(t *testing.T) {
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(req, cid2, nil, op))
})
cnr.Value.SetBasicACL(acl.Private)
t.Run("extension disabled", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(req, cid2, nil, op))
})
t.Run("invalid key", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op))
+ require.Error(t, s.verifyClient(req, cid1, nil, op))
})
t.Run("bearer", func(t *testing.T) {
@@ -203,7 +164,7 @@ func TestMessageSign(t *testing.T) {
t.Run("invalid bearer", func(t *testing.T) {
req.Body.BearerToken = []byte{0xFF}
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer CID", func(t *testing.T) {
@@ -212,7 +173,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer owner", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -220,7 +181,7 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("invalid bearer signature", func(t *testing.T) {
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -232,88 +193,20 @@ func TestMessageSign(t *testing.T) {
req.Body.BearerToken = bv2.StableMarshal(nil)
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
-
- t.Run("omit override within bt", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenNoOverride()
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- })
- })
-
- t.Run("invalid override within bearer token", func(t *testing.T) {
- t.Run("personated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
-
- t.Run("impersonated", func(t *testing.T) {
- bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey())
- bt.SetImpersonate(true)
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
-
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid")
- })
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
})
t.Run("impersonate", func(t *testing.T) {
cnr.Value.SetBasicACL(acl.PublicRWExtended)
var bt bearer.Token
- bt.SetExp(10)
bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
- require.NoError(t, bt.Sign(privs[0].PrivateKey))
- req.Body.BearerToken = bt.Marshal()
- require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
- })
-
- t.Run("impersonate but invalid signer", func(t *testing.T) {
- var bt bearer.Token
- bt.SetExp(10)
- bt.SetImpersonate(true)
- bt.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid1.EncodeToString(),
- },
- Chains: []ape.Chain{},
- })
require.NoError(t, bt.Sign(privs[1].PrivateKey))
req.Body.BearerToken = bt.Marshal()
require.NoError(t, SignMessage(req, &privs[0].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey())
@@ -323,95 +216,64 @@ func TestMessageSign(t *testing.T) {
t.Run("put and get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[1].PrivateKey))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("only get", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[2].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
t.Run("none", func(t *testing.T) {
require.NoError(t, SignMessage(req, &privs[3].PrivateKey))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut))
- require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut))
+ require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet))
})
})
}
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
var b bearer.Token
- b.SetExp(currentEpoch + 1)
- b.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- Name: cid.EncodeToString(),
- },
- Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
- })
+ b.SetEACLTable(*testTable(cid, forPutGet, forGet))
return b
}
-func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- b.SetAPEOverride(bearer.APEOverride{
- Target: ape.ChainTarget{
- TargetType: ape.TargetTypeContainer,
- },
- Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
- })
+func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table {
+ tgtGet := eaclSDK.NewTarget()
+ tgtGet.SetRole(eaclSDK.RoleUnknown)
+ tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
- return b
-}
+ rGet := eaclSDK.NewRecord()
+ rGet.SetAction(eaclSDK.ActionAllow)
+ rGet.SetOperation(eaclSDK.OperationGet)
+ rGet.SetTargets(*tgtGet)
-func testBearerTokenNoOverride() bearer.Token {
- var b bearer.Token
- b.SetExp(currentEpoch + 1)
- return b
-}
+ tgtPut := eaclSDK.NewTarget()
+ tgtPut.SetRole(eaclSDK.RoleUnknown)
+ tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
-func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
- ruleGet := chain.Rule{
- Status: chain.Allow,
- Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
- Actions: chain.Actions{Names: []string{native.MethodGetObject}},
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forPutGet.Bytes()),
- },
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forGet.Bytes()),
- },
- },
- }
- rulePut := chain.Rule{
- Status: chain.Allow,
- Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
- Actions: chain.Actions{Names: []string{native.MethodPutObject}},
- Any: true,
- Condition: []chain.Condition{
- {
- Op: chain.CondStringEquals,
- Kind: chain.KindRequest,
- Key: native.PropertyKeyActorPublicKey,
- Value: hex.EncodeToString(forPutGet.Bytes()),
- },
- },
+ rPut := eaclSDK.NewRecord()
+ rPut.SetAction(eaclSDK.ActionAllow)
+ rPut.SetOperation(eaclSDK.OperationPut)
+ rPut.SetTargets(*tgtPut)
+
+ tb := eaclSDK.NewTable()
+ tb.AddRecord(rGet)
+ tb.AddRecord(rPut)
+
+ tgt := eaclSDK.NewTarget()
+ tgt.SetRole(eaclSDK.RoleOthers)
+
+ for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
+ r := eaclSDK.NewRecord()
+ r.SetAction(eaclSDK.ActionDeny)
+ r.SetTargets(*tgt)
+ r.SetOperation(op)
+ tb.AddRecord(r)
}
- return &chain.Chain{
- Rules: []chain.Rule{
- ruleGet,
- rulePut,
- },
- }
+ tb.SetCID(cid)
+
+ return tb
}
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index af355639f..9cff8b351 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -2,21 +2,16 @@ package tree
import (
"context"
- "crypto/ecdsa"
"crypto/sha256"
- "crypto/tls"
"errors"
"fmt"
"io"
"math"
"math/rand"
"sync"
- "sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -24,15 +19,12 @@ import (
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@@ -46,7 +38,7 @@ const defaultSyncWorkerCount = 20
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
// is not included in the container.
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -78,8 +70,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
var treesToSync []string
var outErr error
- err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool {
- resp, outErr = c.TreeList(fCtx, req)
+ err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
+ resp, outErr = c.TreeList(ctx, req)
if outErr != nil {
return false
}
@@ -99,7 +91,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -107,7 +99,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
- s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+ s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -119,7 +111,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
// SynchronizeTree tries to synchronize log starting from the last stored height.
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
- nodes, pos, err := s.getContainerNodes(ctx, cid)
+ nodes, pos, err := s.getContainerNodes(cid)
if err != nil {
return fmt.Errorf("can't get container nodes: %w", err)
}
@@ -138,9 +130,14 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
}
// mergeOperationStreams performs merge sort for node operation streams to one stream.
-func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
+func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
defer close(merged)
+ ms := make([]*pilorama.Move, len(streams))
+ for i := range streams {
+ ms[i] = <-streams[i]
+ }
+
// Merging different node streams shuffles incoming operations like that:
//
// x - operation from the stream A
@@ -152,15 +149,6 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
// operation height from the stream B. This height is stored in minStreamedLastHeight.
var minStreamedLastHeight uint64 = math.MaxUint64
- ms := make([]*pilorama.Move, len(streams))
- for i := range streams {
- select {
- case ms[i] = <-streams[i]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
- }
-
for {
var minTimeMoveTime uint64 = math.MaxUint64
minTimeMoveIndex := -1
@@ -175,14 +163,12 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
break
}
- select {
- case merged <- ms[minTimeMoveIndex]:
- case <-ctx.Done():
- return minStreamedLastHeight
- }
+ merged <- ms[minTimeMoveIndex]
height := ms[minTimeMoveIndex].Time
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
- minStreamedLastHeight = min(minStreamedLastHeight, height)
+ if minStreamedLastHeight > height {
+ minStreamedLastHeight = height
+ }
}
}
@@ -190,76 +176,83 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m
}
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
- operationStream <-chan *pilorama.Move,
-) (uint64, error) {
+ operationStream <-chan *pilorama.Move) uint64 {
+ errGroup, _ := errgroup.WithContext(ctx)
+ const workersCount = 1024
+ errGroup.SetLimit(workersCount)
+
+ // We run TreeApply concurrently for the operation batch. Let's consider two operations
+ // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail
+ // on m1. That means the service must start sync from m1.Time in the next iteration and
+ // this height is stored in unappliedOperationHeight.
+ var unappliedOperationHeight uint64 = math.MaxUint64
+ var heightMtx sync.Mutex
+
var prev *pilorama.Move
- var batch []*pilorama.Move
for m := range operationStream {
+ m := m
+
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
}
prev = m
- batch = append(batch, m)
- if len(batch) == s.syncBatchSize {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
+ errGroup.Go(func() error {
+ if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
+ heightMtx.Lock()
+ if m.Time < unappliedOperationHeight {
+ unappliedOperationHeight = m.Time
+ }
+ heightMtx.Unlock()
+ return err
}
- batch = batch[:0]
- }
+ return nil
+ })
}
- if len(batch) > 0 {
- if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
- return batch[0].Time, err
- }
- }
- return math.MaxUint64, nil
+ _ = errGroup.Wait()
+ return unappliedOperationHeight
}
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
- height uint64, cc *grpc.ClientConn, opsCh chan<- *pilorama.Move,
-) error {
- treeClient := NewTreeServiceClient(cc)
-
+ height uint64, treeClient TreeServiceClient, opsCh chan<- *pilorama.Move) (uint64, error) {
rawCID := make([]byte, sha256.Size)
cid.Encode(rawCID)
- req := &GetOpLogRequest{
- Body: &GetOpLogRequest_Body{
- ContainerId: rawCID,
- TreeId: treeID,
- Height: height,
- },
- }
- if err := SignMessage(req, s.key); err != nil {
- return err
- }
+ for {
+ newHeight := height
+ req := &GetOpLogRequest{
+ Body: &GetOpLogRequest_Body{
+ ContainerId: rawCID,
+ TreeId: treeID,
+ Height: newHeight,
+ },
+ }
+ if err := SignMessage(req, s.key); err != nil {
+ return 0, err
+ }
- c, err := treeClient.GetOpLog(ctx, req)
- if err != nil {
- return fmt.Errorf("can't initialize client: %w", err)
- }
- res, err := c.Recv()
- for ; err == nil; res, err = c.Recv() {
- lm := res.GetBody().GetOperation()
- m := &pilorama.Move{
- Parent: lm.GetParentId(),
- Child: lm.GetChildId(),
+ c, err := treeClient.GetOpLog(ctx, req)
+ if err != nil {
+ return 0, fmt.Errorf("can't initialize client: %w", err)
}
- if err := m.FromBytes(lm.GetMeta()); err != nil {
- return err
+ res, err := c.Recv()
+ for ; err == nil; res, err = c.Recv() {
+ lm := res.GetBody().GetOperation()
+ m := &pilorama.Move{
+ Parent: lm.ParentId,
+ Child: lm.ChildId,
+ }
+ if err := m.Meta.FromBytes(lm.Meta); err != nil {
+ return 0, err
+ }
+ opsCh <- m
}
- select {
- case opsCh <- m:
- case <-ctx.Done():
- return ctx.Err()
+ if height == newHeight || err != nil && !errors.Is(err, io.EOF) {
+ return newHeight, err
}
+ height = newHeight
}
- if !errors.Is(err, io.EOF) {
- return err
- }
- return nil
}
// synchronizeTree synchronizes operations getting them from different nodes.
@@ -268,9 +261,11 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
// already applied operation and keep good batching.
// The method returns a height that service should start sync from in the next time.
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
- treeID string, nodes []netmapSDK.NodeInfo,
-) uint64 {
- s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+ treeID string, nodes []netmapSDK.NodeInfo) uint64 {
+ s.log.Debug(logs.TreeSynchronizeTree,
+ zap.Stringer("cid", cid),
+ zap.String("tree", treeID),
+ zap.Uint64("from", from))
errGroup, egCtx := errgroup.WithContext(ctx)
const workersCount = 1024
@@ -283,53 +278,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
merged := make(chan *pilorama.Move)
var minStreamedLastHeight uint64
errGroup.Go(func() error {
- minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged)
+ minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
return nil
})
var minUnappliedHeight uint64
errGroup.Go(func() error {
- var err error
- minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged)
- return err
+ minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
+ return nil
})
- var allNodesSynced atomic.Bool
- allNodesSynced.Store(true)
-
for i, n := range nodes {
+ i := i
+ n := n
errGroup.Go(func() error {
- var nodeSynced bool
- for addr := range n.NetworkEndpoints() {
+ height := from
+ n.IterateNetworkEndpoints(func(addr string) bool {
var a network.Address
if err := a.FromString(addr); err != nil {
- s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ return false
}
- cc, err := dialTreeService(ctx, a, s.key, s.ds)
+ cc, err := grpc.DialContext(egCtx, a.URIAddr(),
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing_grpc.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing_grpc.NewStreamClientInterceptor(),
+ ),
+ grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
- continue
+ // Failed to connect, try the next address.
+ return false
}
+ defer cc.Close()
- err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
- if err != nil {
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+ treeClient := NewTreeServiceClient(cc)
+ for {
+ h, err := s.startStream(egCtx, cid, treeID, from, treeClient, nodeOperationStreams[i])
+ if height < h {
+ height = h
+ }
+ if err != nil || h <= height {
+ // Error with the response, try the next node.
+ return true
+ }
}
- nodeSynced = err == nil
- _ = cc.Close()
- break
- }
+ })
close(nodeOperationStreams[i])
- if !nodeSynced {
- allNodesSynced.Store(false)
- }
return nil
})
}
if err := errGroup.Wait(); err != nil {
- allNodesSynced.Store(false)
- s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := minStreamedLastHeight
@@ -338,66 +340,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
} else {
newHeight++
}
- if allNodesSynced.Load() {
- return newHeight
- }
- return from
-}
-
-func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) {
- cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer()))
- if err != nil {
- return nil, err
- }
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- defer cancel()
-
- req := &HealthcheckRequest{
- Body: &HealthcheckRequest_Body{},
- }
- if err := SignMessage(req, key); err != nil {
- return nil, err
- }
-
- // perform some request to check connection
- if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
- _ = cc.Close()
- return nil, err
- }
- return cc, nil
-}
-
-func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- host, isTLS, err := client.ParseURI(a.URIAddr())
- if err != nil {
- return nil, err
- }
-
- creds := insecure.NewCredentials()
- if isTLS {
- creds = credentials.NewTLS(&tls.Config{})
- }
-
- defaultOpts := []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
- metrics.NewUnaryClientInterceptor(),
- tracing_grpc.NewUnaryClientInterceptor(),
- tagging.NewUnaryClientInterceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
- metrics.NewStreamClientInterceptor(),
- tracing_grpc.NewStreamClientInterceptor(),
- tagging.NewStreamClientInterceptor(),
- ),
- grpc.WithTransportCredentials(creds),
- grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
- grpc.WithDisableServiceConfig(),
- }
-
- return grpc.NewClient(host, append(defaultOpts, opts...)...)
+ return newHeight
}
// ErrAlreadySyncing is returned when a service synchronization has already
@@ -437,25 +380,25 @@ func (s *Service) syncLoop(ctx context.Context) {
return
case <-s.syncChan:
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
- s.log.Info(ctx, logs.TreeSyncingTrees)
+ s.log.Debug(logs.TreeSyncingTrees)
start := time.Now()
- cnrs, err := s.cnrSource.List(ctx)
+ cnrs, err := s.cfg.cnrSource.List()
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
+ s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)
span.End()
break
}
- newMap, cnrsToSync := s.containersToSync(ctx, cnrs)
+ newMap, cnrsToSync := s.containersToSync(cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
- s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
+ s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
s.metrics.AddSyncDuration(time.Since(start), true)
span.End()
@@ -472,22 +415,22 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
var wg sync.WaitGroup
for _, cnr := range cnrs {
wg.Add(1)
-
+ cnr := cnr
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
+ s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -511,9 +454,9 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
continue
}
- existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr)
+ existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
+ s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
zap.Stringer("cid", cnr),
zap.Error(err))
} else if existed {
@@ -525,25 +468,25 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
+ s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
}
}
-func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
+func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
cnrsToSync := make([]cid.ID, 0, len(cnrs))
for _, cnr := range cnrs {
- _, pos, err := s.getContainerNodes(ctx, cnr)
+ _, pos, err := s.getContainerNodes(cnr)
if err != nil {
- s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes,
+ s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 87d419408..190b4ccbb 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -1,7 +1,6 @@
package tree
import (
- "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -52,6 +51,8 @@ func Test_mergeOperationStreams(t *testing.T) {
// generate and put values to all chans
for i, ch := range nodeOpChans {
+ i := i
+ ch := ch
go func() {
for _, tm := range tt.opTimes[i] {
op := &pilorama.Move{}
@@ -65,7 +66,7 @@ func Test_mergeOperationStreams(t *testing.T) {
merged := make(chan *pilorama.Move, 1)
min := make(chan uint64)
go func() {
- min <- mergeOperationStreams(context.Background(), nodeOpChans, merged)
+ min <- mergeOperationStreams(nodeOpChans, merged)
}()
var res []uint64
diff --git a/pkg/services/tree/types.pb.go b/pkg/services/tree/types.pb.go
new file mode 100644
index 000000000..b4d6981ef
--- /dev/null
+++ b/pkg/services/tree/types.pb.go
@@ -0,0 +1,320 @@
+//*
+// Auxiliary structures to use with tree service.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.9
+// source: pkg/services/tree/types.proto
+
+package tree
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// KeyValue represents key-value pair attached to an object.
+type KeyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Attribute name.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Attribute value.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+ *x = KeyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *KeyValue) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// LogMove represents log-entry for a single move operation.
+type LogMove struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of the parent node.
+ ParentId uint64 `protobuf:"varint,1,opt,name=parent_id,json=parentID,proto3" json:"parent_id,omitempty"`
+ // Node meta information, including operation timestamp.
+ Meta []byte `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"`
+ // ID of the node to move.
+ ChildId uint64 `protobuf:"varint,3,opt,name=child_id,json=childID,proto3" json:"child_id,omitempty"`
+}
+
+func (x *LogMove) Reset() {
+ *x = LogMove{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogMove) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogMove) ProtoMessage() {}
+
+func (x *LogMove) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogMove.ProtoReflect.Descriptor instead.
+func (*LogMove) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *LogMove) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+
+func (x *LogMove) GetMeta() []byte {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+
+func (x *LogMove) GetChildId() uint64 {
+ if x != nil {
+ return x.ChildId
+ }
+ return 0
+}
+
+// Signature of a message.
+type Signature struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Serialized public key as defined in FrostFS API.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Signature of a message body.
+ Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
+}
+
+func (x *Signature) Reset() {
+ *x = Signature{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Signature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Signature) ProtoMessage() {}
+
+func (x *Signature) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_tree_types_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
+func (*Signature) Descriptor() ([]byte, []int) {
+ return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+
+var File_pkg_services_tree_types_proto protoreflect.FileDescriptor
+
+var file_pkg_services_tree_types_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
+ 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x07, 0x4c, 0x6f, 0x67,
+ 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
+ 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x44,
+ 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e,
+ 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75,
+ 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66,
+ 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_pkg_services_tree_types_proto_rawDescOnce sync.Once
+ file_pkg_services_tree_types_proto_rawDescData = file_pkg_services_tree_types_proto_rawDesc
+)
+
+func file_pkg_services_tree_types_proto_rawDescGZIP() []byte {
+ file_pkg_services_tree_types_proto_rawDescOnce.Do(func() {
+ file_pkg_services_tree_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_types_proto_rawDescData)
+ })
+ return file_pkg_services_tree_types_proto_rawDescData
+}
+
+var file_pkg_services_tree_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_pkg_services_tree_types_proto_goTypes = []interface{}{
+ (*KeyValue)(nil), // 0: tree.KeyValue
+ (*LogMove)(nil), // 1: tree.LogMove
+ (*Signature)(nil), // 2: tree.Signature
+}
+var file_pkg_services_tree_types_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pkg_services_tree_types_proto_init() }
+func file_pkg_services_tree_types_proto_init() {
+ if File_pkg_services_tree_types_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pkg_services_tree_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogMove); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_tree_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Signature); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pkg_services_tree_types_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pkg_services_tree_types_proto_goTypes,
+ DependencyIndexes: file_pkg_services_tree_types_proto_depIdxs,
+ MessageInfos: file_pkg_services_tree_types_proto_msgTypes,
+ }.Build()
+ File_pkg_services_tree_types_proto = out.File
+ file_pkg_services_tree_types_proto_rawDesc = nil
+ file_pkg_services_tree_types_proto_goTypes = nil
+ file_pkg_services_tree_types_proto_depIdxs = nil
+}
diff --git a/pkg/services/tree/types.proto b/pkg/services/tree/types.proto
index f122c7cf4..23d73b9ad 100644
--- a/pkg/services/tree/types.proto
+++ b/pkg/services/tree/types.proto
@@ -10,25 +10,25 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tre
// KeyValue represents key-value pair attached to an object.
message KeyValue {
// Attribute name.
- string key = 1 [ json_name = "key" ];
+ string key = 1 [json_name = "key"];
// Attribute value.
- bytes value = 2 [ json_name = "value" ];
+ bytes value = 2 [json_name = "value"];
}
// LogMove represents log-entry for a single move operation.
message LogMove {
// ID of the parent node.
- uint64 parent_id = 1 [ json_name = "parentID" ];
+ uint64 parent_id = 1 [json_name = "parentID"];
// Node meta information, including operation timestamp.
- bytes meta = 2 [ json_name = "meta" ];
+ bytes meta = 2 [json_name = "meta"];
// ID of the node to move.
- uint64 child_id = 3 [ json_name = "childID" ];
+ uint64 child_id = 3 [json_name = "childID"];
}
// Signature of a message.
message Signature {
// Serialized public key as defined in FrostFS API.
- bytes key = 1 [ json_name = "key" ];
+ bytes key = 1 [json_name = "key"];
// Signature of a message body.
- bytes sign = 2 [ json_name = "signature" ];
+ bytes sign = 2 [json_name = "signature"];
}
diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go
index 2827b10a9..707fcc3cc 100644
--- a/pkg/services/tree/types_frostfs.pb.go
+++ b/pkg/services/tree/types_frostfs.pb.go
@@ -2,29 +2,7 @@
package tree
-import (
- json "encoding/json"
- fmt "fmt"
- pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool"
- proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
- encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding"
- easyproto "github.com/VictoriaMetrics/easyproto"
- jlexer "github.com/mailru/easyjson/jlexer"
- jwriter "github.com/mailru/easyjson/jwriter"
- strconv "strconv"
-)
-
-type KeyValue struct {
- Key string `json:"key"`
- Value []byte `json:"value"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*KeyValue)(nil)
- _ encoding.ProtoUnmarshaler = (*KeyValue)(nil)
- _ json.Marshaler = (*KeyValue)(nil)
- _ json.Unmarshaler = (*KeyValue)(nil)
-)
+import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
// StableSize returns the size of x in protobuf format.
//
@@ -38,176 +16,27 @@ func (x *KeyValue) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *KeyValue) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *KeyValue) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *KeyValue) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendString(1, x.Key)
- }
- if len(x.Value) != 0 {
- mm.AppendBytes(2, x.Value)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.StringMarshal(1, buf[offset:], x.Key)
+ offset += proto.BytesMarshal(2, buf[offset:], x.Value)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *KeyValue) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "KeyValue")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.String()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Value
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Value")
- }
- x.Value = data
- }
- }
- return nil
-}
-func (x *KeyValue) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-func (x *KeyValue) SetKey(v string) {
- x.Key = v
-}
-func (x *KeyValue) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-func (x *KeyValue) SetValue(v []byte) {
- x.Value = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *KeyValue) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- out.String(x.Key)
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"value\":"
- out.RawString(prefix)
- if x.Value != nil {
- out.Base64Bytes(x.Value)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *KeyValue) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f string
- f = in.String()
- x.Key = f
- }
- case "value":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Value = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type LogMove struct {
- ParentId uint64 `json:"parentID"`
- Meta []byte `json:"meta"`
- ChildId uint64 `json:"childID"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*LogMove)(nil)
- _ encoding.ProtoUnmarshaler = (*LogMove)(nil)
- _ json.Marshaler = (*LogMove)(nil)
- _ json.Unmarshaler = (*LogMove)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -221,229 +50,28 @@ func (x *LogMove) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *LogMove) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *LogMove) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *LogMove) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if x.ParentId != 0 {
- mm.AppendUint64(1, x.ParentId)
- }
- if len(x.Meta) != 0 {
- mm.AppendBytes(2, x.Meta)
- }
- if x.ChildId != 0 {
- mm.AppendUint64(3, x.ChildId)
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.UInt64Marshal(1, buf[offset:], x.ParentId)
+ offset += proto.BytesMarshal(2, buf[offset:], x.Meta)
+ offset += proto.UInt64Marshal(3, buf[offset:], x.ChildId)
+ return buf
}
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *LogMove) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "LogMove")
- }
- switch fc.FieldNum {
- case 1: // ParentId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ParentId")
- }
- x.ParentId = data
- case 2: // Meta
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Meta")
- }
- x.Meta = data
- case 3: // ChildId
- data, ok := fc.Uint64()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "ChildId")
- }
- x.ChildId = data
- }
- }
- return nil
-}
-func (x *LogMove) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-func (x *LogMove) SetParentId(v uint64) {
- x.ParentId = v
-}
-func (x *LogMove) GetMeta() []byte {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-func (x *LogMove) SetMeta(v []byte) {
- x.Meta = v
-}
-func (x *LogMove) GetChildId() uint64 {
- if x != nil {
- return x.ChildId
- }
- return 0
-}
-func (x *LogMove) SetChildId(v uint64) {
- x.ChildId = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *LogMove) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"parentID\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10)
- out.RawByte('"')
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"meta\":"
- out.RawString(prefix)
- if x.Meta != nil {
- out.Base64Bytes(x.Meta)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"childID\":"
- out.RawString(prefix)
- out.RawByte('"')
- out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10)
- out.RawByte('"')
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *LogMove) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "parentID":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ParentId = f
- }
- case "meta":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Meta = f
- }
- case "childID":
- {
- var f uint64
- r := in.JsonNumber()
- n := r.String()
- v, err := strconv.ParseUint(n, 10, 64)
- if err != nil {
- in.AddError(err)
- return
- }
- pv := uint64(v)
- f = pv
- x.ChildId = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
- }
-}
-
-type Signature struct {
- Key []byte `json:"key"`
- Sign []byte `json:"signature"`
-}
-
-var (
- _ encoding.ProtoMarshaler = (*Signature)(nil)
- _ encoding.ProtoUnmarshaler = (*Signature)(nil)
- _ json.Marshaler = (*Signature)(nil)
- _ json.Unmarshaler = (*Signature)(nil)
-)
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -456,169 +84,23 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
-func (x *Signature) MarshalProtobuf(dst []byte) []byte {
- m := pool.MarshalerPool.Get()
- defer pool.MarshalerPool.Put(m)
- x.EmitProtobuf(m.MessageMarshaler())
- dst = m.Marshal(dst)
- return dst
-}
-
-func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *Signature) StableMarshal(buf []byte) []byte {
if x == nil {
- return
+ return []byte{}
}
- if len(x.Key) != 0 {
- mm.AppendBytes(1, x.Key)
- }
- if len(x.Sign) != 0 {
- mm.AppendBytes(2, x.Sign)
- }
-}
-
-// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
-func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
- var fc easyproto.FieldContext
- for len(src) > 0 {
- src, err = fc.NextField(src)
- if err != nil {
- return fmt.Errorf("cannot read next field in %s", "Signature")
- }
- switch fc.FieldNum {
- case 1: // Key
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Key")
- }
- x.Key = data
- case 2: // Sign
- data, ok := fc.Bytes()
- if !ok {
- return fmt.Errorf("cannot unmarshal field %s", "Sign")
- }
- x.Sign = data
- }
- }
- return nil
-}
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-func (x *Signature) SetKey(v []byte) {
- x.Key = v
-}
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-func (x *Signature) SetSign(v []byte) {
- x.Sign = v
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Signature) MarshalJSON() ([]byte, error) {
- w := jwriter.Writer{}
- x.MarshalEasyJSON(&w)
- return w.Buffer.BuildBytes(), w.Error
-}
-func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
- if x == nil {
- out.RawString("null")
- return
- }
- first := true
- out.RawByte('{')
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"key\":"
- out.RawString(prefix)
- if x.Key != nil {
- out.Base64Bytes(x.Key)
- } else {
- out.String("")
- }
- }
- {
- if !first {
- out.RawByte(',')
- } else {
- first = false
- }
- const prefix string = "\"signature\":"
- out.RawString(prefix)
- if x.Sign != nil {
- out.Base64Bytes(x.Sign)
- } else {
- out.String("")
- }
- }
- out.RawByte('}')
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (x *Signature) UnmarshalJSON(data []byte) error {
- r := jlexer.Lexer{Data: data}
- x.UnmarshalEasyJSON(&r)
- return r.Error()
-}
-func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
- isTopLevel := in.IsStart()
- if in.IsNull() {
- if isTopLevel {
- in.Consumed()
- }
- in.Skip()
- return
- }
- in.Delim('{')
- for !in.IsDelim('}') {
- key := in.UnsafeFieldName(false)
- in.WantColon()
- if in.IsNull() {
- in.Skip()
- in.WantComma()
- continue
- }
- switch key {
- case "key":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Key = f
- }
- case "signature":
- {
- var f []byte
- {
- tmp := in.Bytes()
- if len(tmp) == 0 {
- tmp = nil
- }
- f = tmp
- }
- x.Sign = f
- }
- }
- in.WantComma()
- }
- in.Delim('}')
- if isTopLevel {
- in.Consumed()
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
}
+ var offset int
+ offset += proto.BytesMarshal(1, buf[offset:], x.Key)
+ offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
+ return buf
}
diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go
index 5152a8ece..005a643e5 100644
--- a/pkg/services/util/response/service.go
+++ b/pkg/services/util/response/service.go
@@ -1,10 +1,10 @@
package response
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index 348a45a94..bce43d6e8 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -5,8 +5,8 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
diff --git a/pkg/util/ape/converter.go b/pkg/util/ape/converter.go
deleted file mode 100644
index c706cf052..000000000
--- a/pkg/util/ape/converter.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package ape
-
-import (
- "encoding/hex"
- "fmt"
-
- v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
-)
-
-type ConvertEACLError struct {
- nested error
-}
-
-func (e *ConvertEACLError) Error() string {
- if e == nil {
- return ""
- }
- return "failed to convert eACL table to policy engine chain: " + e.nested.Error()
-}
-
-func (e *ConvertEACLError) Unwrap() error {
- if e == nil {
- return nil
- }
- return e.nested
-}
-
-// ConvertEACLToAPE converts eacl.Table to apechain.Chain.
-func ConvertEACLToAPE(eaclTable *eacl.Table) (*apechain.Chain, error) {
- if eaclTable == nil {
- return nil, nil
- }
- res := &apechain.Chain{
- MatchType: apechain.MatchTypeFirstMatch,
- }
-
- resource := getResource(eaclTable)
-
- for _, eaclRecord := range eaclTable.Records() {
- if len(eaclRecord.Targets()) == 0 {
- // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
- // and https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L36
- // such record doesn't have any effect
- continue
- }
-
- st, err := actionToStatus(eaclRecord.Action())
- if err != nil {
- return nil, err
- }
- act, err := operationToAction(eaclRecord.Operation())
- if err != nil {
- return nil, err
- }
-
- if len(eaclRecord.Filters()) == 0 {
- res.Rules = appendTargetsOnly(res.Rules, st, act, resource, eaclRecord.Targets())
- } else {
- res.Rules, err = appendTargetsAndFilters(res.Rules, st, act, resource, eaclRecord.Targets(), eaclRecord.Filters())
- if err != nil {
- return nil, err
- }
- }
- }
-
- return res, nil
-}
-
-func apeRoleConds(role eacl.Role) (res []apechain.Condition) {
- switch role {
- case eacl.RoleSystem:
- res = append(res,
- apechain.Condition{
- Op: apechain.CondStringEquals,
- Kind: apechain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleContainer,
- },
- )
- res = append(res,
- apechain.Condition{
- Op: apechain.CondStringEquals,
- Kind: apechain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleIR,
- },
- )
- case eacl.RoleOthers:
- res = append(res,
- apechain.Condition{
- Op: apechain.CondStringEquals,
- Kind: apechain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOthers,
- },
- )
- case eacl.RoleUser:
- res = append(res,
- apechain.Condition{
- Op: apechain.CondStringEquals,
- Kind: apechain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleOwner,
- },
- )
- case eacl.RoleUnknown:
- // such condition has no effect
- default:
- }
- return
-}
-
-func appendTargetsOnly(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources, targets []eacl.Target) []apechain.Rule {
- // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
- // role OR public key must be equal
- rule := apechain.Rule{
- Status: st,
- Actions: act,
- Resources: res,
- Any: true,
- }
- for _, target := range targets {
- rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...)
- for _, binKey := range target.BinaryKeys() {
- var pubKeyCondition apechain.Condition
- pubKeyCondition.Kind = apechain.KindRequest
- pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey
- pubKeyCondition.Value = hex.EncodeToString(binKey)
- pubKeyCondition.Op = apechain.CondStringEquals
- rule.Condition = append(rule.Condition, pubKeyCondition)
- }
- }
- return append(source, rule)
-}
-
-func appendTargetsAndFilters(source []apechain.Rule, st apechain.Status, act apechain.Actions, res apechain.Resources,
- targets []eacl.Target, filters []eacl.Filter,
-) ([]apechain.Rule, error) {
- // see https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/ab75edd70939564421936d207ef80d6c1398b51b/eacl/validator.go#L101
- // role OR public key must be equal
- // so filters are repeated for each role and public key
- var err error
- for _, target := range targets {
- rule := apechain.Rule{
- Status: st,
- Actions: act,
- Resources: res,
- }
- rule.Condition = append(rule.Condition, apeRoleConds(target.Role())...)
- rule.Condition, err = appendFilters(rule.Condition, filters)
- if err != nil {
- return nil, err
- }
-
- source = append(source, rule)
-
- for _, binKey := range target.BinaryKeys() {
- rule := apechain.Rule{
- Status: st,
- Actions: act,
- Resources: res,
- }
- var pubKeyCondition apechain.Condition
- pubKeyCondition.Kind = apechain.KindRequest
- pubKeyCondition.Key = nativeschema.PropertyKeyActorPublicKey
- pubKeyCondition.Value = hex.EncodeToString(binKey)
- pubKeyCondition.Op = apechain.CondStringEquals
-
- rule.Condition = append(rule.Condition, pubKeyCondition)
- rule.Condition, err = appendFilters(rule.Condition, filters)
- if err != nil {
- return nil, err
- }
-
- source = append(source, rule)
- }
- }
-
- return source, nil
-}
-
-func appendFilters(source []apechain.Condition, filters []eacl.Filter) ([]apechain.Condition, error) {
- for _, filter := range filters {
- var cond apechain.Condition
- var isObject bool
- if filter.From() == eacl.HeaderFromObject {
- cond.Kind = apechain.KindResource
- isObject = true
- } else if filter.From() == eacl.HeaderFromRequest {
- cond.Kind = apechain.KindRequest
- } else {
- return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter from: %d", filter.From())}
- }
-
- if filter.Matcher() == eacl.MatchStringEqual {
- cond.Op = apechain.CondStringEquals
- } else if filter.Matcher() == eacl.MatchStringNotEqual {
- cond.Op = apechain.CondStringNotEquals
- } else {
- return nil, &ConvertEACLError{nested: fmt.Errorf("unknown filter matcher: %d", filter.Matcher())}
- }
-
- cond.Key = eaclKeyToAPEKey(filter.Key(), isObject)
- cond.Value = filter.Value()
-
- source = append(source, cond)
- }
- return source, nil
-}
-
-func eaclKeyToAPEKey(key string, isObject bool) string {
- if !isObject {
- return key
- }
- switch key {
- default:
- return key
- case v2acl.FilterObjectVersion:
- return nativeschema.PropertyKeyObjectVersion
- case v2acl.FilterObjectID:
- return nativeschema.PropertyKeyObjectID
- case v2acl.FilterObjectContainerID:
- return nativeschema.PropertyKeyObjectContainerID
- case v2acl.FilterObjectOwnerID:
- return nativeschema.PropertyKeyObjectOwnerID
- case v2acl.FilterObjectCreationEpoch:
- return nativeschema.PropertyKeyObjectCreationEpoch
- case v2acl.FilterObjectPayloadLength:
- return nativeschema.PropertyKeyObjectPayloadLength
- case v2acl.FilterObjectPayloadHash:
- return nativeschema.PropertyKeyObjectPayloadHash
- case v2acl.FilterObjectType:
- return nativeschema.PropertyKeyObjectType
- case v2acl.FilterObjectHomomorphicHash:
- return nativeschema.PropertyKeyObjectHomomorphicHash
- }
-}
-
-func getResource(eaclTable *eacl.Table) apechain.Resources {
- cnrID, isSet := eaclTable.CID()
- if isSet {
- return apechain.Resources{
- Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- }
- return apechain.Resources{
- Names: []string{nativeschema.ResourceFormatRootObjects},
- }
-}
-
-func actionToStatus(a eacl.Action) (apechain.Status, error) {
- switch a {
- case eacl.ActionAllow:
- return apechain.Allow, nil
- case eacl.ActionDeny:
- return apechain.AccessDenied, nil
- default:
- return apechain.NoRuleFound, &ConvertEACLError{nested: fmt.Errorf("unknown action: %d", a)}
- }
-}
-
-var eaclOperationToEngineAction = map[eacl.Operation]apechain.Actions{
- eacl.OperationGet: {Names: []string{nativeschema.MethodGetObject}},
- eacl.OperationHead: {Names: []string{nativeschema.MethodHeadObject}},
- eacl.OperationPut: {Names: []string{nativeschema.MethodPutObject}},
- eacl.OperationDelete: {Names: []string{nativeschema.MethodDeleteObject}},
- eacl.OperationSearch: {Names: []string{nativeschema.MethodSearchObject}},
- eacl.OperationRange: {Names: []string{nativeschema.MethodRangeObject}},
- eacl.OperationRangeHash: {Names: []string{nativeschema.MethodHashObject}},
-}
-
-func operationToAction(op eacl.Operation) (apechain.Actions, error) {
- if v, ok := eaclOperationToEngineAction[op]; ok {
- return v, nil
- }
- return apechain.Actions{}, &ConvertEACLError{nested: fmt.Errorf("unknown operation: %d", op)}
-}
diff --git a/pkg/util/ape/converter_test.go b/pkg/util/ape/converter_test.go
deleted file mode 100644
index 28125606c..000000000
--- a/pkg/util/ape/converter_test.go
+++ /dev/null
@@ -1,471 +0,0 @@
-package ape
-
-import (
- "encoding/hex"
- "fmt"
- "testing"
-
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "github.com/stretchr/testify/require"
-)
-
-func TestEACLTableWithoutRecords(t *testing.T) {
- t.Parallel()
-
- tb := eacl.NewTable()
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- req := &testRequest{
- res: &testResource{name: nativeschema.ResourceFormatRootObjects},
- }
-
- compare(t, vu, ch, req)
-
- cnrID := cidtest.ID()
- tb.SetCID(cnrID)
- vu.WithContainerID(&cnrID)
- req.res.name = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())
-
- ch, err = ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- compare(t, vu, ch, req)
-}
-
-func TestNoTargets(t *testing.T) {
- t.Parallel()
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
-
- // deny delete without role or key specified
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
- record.AddObjectContainerIDFilter(eacl.MatchStringEqual, cnrID)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(),
- },
- res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- compare(t, vu, ch, req)
- }
-}
-
-func TestNoFilters(t *testing.T) {
- t.Parallel()
-
- t.Run("target match by role only", func(t *testing.T) {
- t.Parallel()
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- record.SetTargets(*target)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
- },
- res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- compare(t, vu, ch, req)
- }
- })
-
- t.Run("target match by role and public key", func(t *testing.T) {
- t.Parallel()
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- p1, err := keys.NewPrivateKey()
- require.NoError(t, err)
- p2, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- vu.WithSenderKey(p2.PublicKey().Bytes())
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()})
- record.SetTargets(*target)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
- nativeschema.PropertyKeyActorPublicKey: string(p2.PublicKey().Bytes()),
- },
- res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- compare(t, vu, ch, req)
- }
- })
-
- t.Run("target match by public key only", func(t *testing.T) {
- t.Parallel()
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- p1, err := keys.NewPrivateKey()
- require.NoError(t, err)
- p2, err := keys.NewPrivateKey()
- require.NoError(t, err)
-
- vu.WithSenderKey(p2.PublicKey().Bytes())
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- target.SetBinaryKeys([][]byte{p1.PublicKey().Bytes(), p2.PublicKey().Bytes()})
- record.SetTargets(*target)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(p2.PublicKey().Bytes()),
- },
- res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- compare(t, vu, ch, req)
- }
- })
-
- t.Run("target doesn't match", func(t *testing.T) {
- t.Parallel()
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleSystem)
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- record.SetTargets(*target)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: eacl.RoleSystem.String(),
- },
- res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())},
- }
- compare(t, vu, ch, req)
- }
- })
-}
-
-func TestWithFilters(t *testing.T) {
- t.Parallel()
-
- t.Run("object attributes", func(t *testing.T) {
- t.Parallel()
-
- const attrKey = "attribute_1"
- const attrValue = "attribute_1_value"
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
- vu.WithHeaderSource(&testHeaderSource{
- headers: map[eacl.FilterHeaderType][]eacl.Header{
- eacl.HeaderFromObject: {&testHeader{key: attrKey, value: attrValue}},
- },
- })
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- record.SetTargets(*target)
-
- record.AddObjectAttributeFilter(eacl.MatchStringEqual, attrKey, attrValue)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
- },
- res: &testResource{
- name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
- props: map[string]string{
- attrKey: attrValue,
- },
- },
- }
- compare(t, vu, ch, req)
- }
- })
-
- t.Run("request attributes", func(t *testing.T) {
- t.Parallel()
-
- const attrKey = "attribute_1"
- const attrValue = "attribute_1_value"
-
- for _, act := range []eacl.Action{eacl.ActionAllow, eacl.ActionDeny} {
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
- vu.WithHeaderSource(&testHeaderSource{
- headers: map[eacl.FilterHeaderType][]eacl.Header{
- eacl.HeaderFromRequest: {&testHeader{key: attrKey, value: attrValue}},
- },
- })
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(act)
- record.SetOperation(eacl.OperationDelete)
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- record.SetTargets(*target)
-
- record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, attrValue)
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: nativeschema.PropertyValueContainerRoleOthers,
- attrKey: attrValue,
- },
- res: &testResource{
- name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
- },
- }
- compare(t, vu, ch, req)
- }
- })
-}
-
-func TestNoHeader(t *testing.T) {
- t.Skip("Should pass after https://git.frostfs.info/TrueCloudLab/policy-engine/issues/8#issuecomment-26126")
-
- t.Parallel()
-
- const attrKey = "attribute_1"
- cnrID := cidtest.ID()
- tb := eacl.NewTable()
- tb.SetCID(cnrID)
-
- vu := &eacl.ValidationUnit{}
- vu.WithEACLTable(tb)
- vu.WithContainerID(&cnrID)
- vu.WithRole(eacl.RoleOthers)
- vu.WithHeaderSource(&testHeaderSource{
- headers: map[eacl.FilterHeaderType][]eacl.Header{
- eacl.HeaderFromRequest: {},
- },
- })
-
- // allow/deny for OTHERS
- record := eacl.NewRecord()
- record.SetAction(eacl.ActionDeny)
- record.SetOperation(eacl.OperationDelete)
-
- target := eacl.NewTarget()
- target.SetRole(eacl.RoleOthers)
- record.SetTargets(*target)
-
- record.AddFilter(eacl.HeaderFromRequest, eacl.MatchStringEqual, attrKey, "")
-
- tb.AddRecord(record)
-
- ch, err := ConvertEACLToAPE(tb)
- require.NoError(t, err)
-
- req := &testRequest{
- props: map[string]string{
- nativeschema.PropertyKeyActorRole: eacl.RoleOthers.String(),
- },
- res: &testResource{
- name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()),
- },
- }
- compare(t, vu, ch, req)
-}
-
-func compare(t *testing.T, vu *eacl.ValidationUnit, ch *apechain.Chain, req *testRequest) {
- validator := eacl.NewValidator()
- for eaclOp, apeOp := range eaclOperationToEngineAction {
- vu.WithOperation(eaclOp)
- req.op = apeOp.Names[0]
-
- eaclAct, recordFound := validator.CalculateAction(vu)
- apeSt, ruleFound := ch.Match(req)
-
- require.Equal(t, recordFound, ruleFound)
- require.NotEqual(t, eacl.ActionUnknown, eaclAct)
- if eaclAct == eacl.ActionAllow {
- if recordFound {
- require.Equal(t, apechain.Allow, apeSt)
- } else {
- require.Equal(t, apechain.NoRuleFound, apeSt)
- }
- } else {
- require.Equal(t, apechain.AccessDenied, apeSt)
- }
- }
-}
-
-type testRequest struct {
- op string
- props map[string]string
- res *testResource
-}
-
-func (r *testRequest) Operation() string {
- return r.op
-}
-
-func (r *testRequest) Property(key string) string {
- if v, ok := r.props[key]; ok {
- return v
- }
- return ""
-}
-
-func (r *testRequest) Resource() resource.Resource {
- return r.res
-}
-
-type testResource struct {
- name string
- props map[string]string
-}
-
-func (r *testResource) Name() string {
- return r.name
-}
-
-func (r *testResource) Property(key string) string {
- if v, ok := r.props[key]; ok {
- return v
- }
- return ""
-}
-
-type testHeaderSource struct {
- headers map[eacl.FilterHeaderType][]eacl.Header
-}
-
-func (s *testHeaderSource) HeadersOfType(t eacl.FilterHeaderType) ([]eacl.Header, bool) {
- v, ok := s.headers[t]
- return v, ok
-}
-
-type testHeader struct {
- key, value string
-}
-
-func (h *testHeader) Key() string { return h.key }
-func (h *testHeader) Value() string { return h.value }
diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go
deleted file mode 100644
index 6f114d45b..000000000
--- a/pkg/util/ape/parser.go
+++ /dev/null
@@ -1,321 +0,0 @@
-package ape
-
-import (
- "errors"
- "fmt"
- "os"
- "strings"
-
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/flynn-archive/go-shlex"
-)
-
-var (
- errInvalidStatementFormat = errors.New("invalid statement format")
- errInvalidConditionFormat = errors.New("invalid condition format")
- errUnknownStatus = errors.New("status is not recognized")
- errUnknownStatusDetail = errors.New("status detail is not recognized")
- errUnknownAction = errors.New("action is not recognized")
- errUnknownBinaryOperator = errors.New("binary operator is not recognized")
- errUnknownCondObjectType = errors.New("condition object type is not recognized")
- errMixedTypesInRule = errors.New("found mixed type of actions in rule")
- errNoActionsInRule = errors.New("there are no actions in rule")
- errUnsupportedResourceFormat = errors.New("unsupported resource format")
- errFailedToParseAllAny = errors.New("any/all is not parsed")
-)
-
-func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error {
- data, err := os.ReadFile(path)
- if err != nil {
- return fmt.Errorf("read file <%s>: %w", path, err)
- }
-
- err = chain.UnmarshalBinary(data)
- if err != nil {
- err = chain.UnmarshalJSON(data)
- if err != nil {
- return fmt.Errorf("invalid format: %w", err)
- }
- }
-
- return nil
-}
-
-// ParseAPEChain parses APE chain rules.
-func ParseAPEChain(chain *apechain.Chain, rules []string) error {
- if len(rules) == 0 {
- return errors.New("no APE rules provided")
- }
-
- for _, rule := range rules {
- r := new(apechain.Rule)
- if err := ParseAPERule(r, rule); err != nil {
- return err
- }
- chain.Rules = append(chain.Rules, *r)
- }
-
- return nil
-}
-
-// ParseAPERule parses access-policy-engine statement from the following form:
-// [:status_detail] ... [...] ...
-//
-// Examples:
-// deny Object.Put *
-// deny:QuotaLimitReached Object.Put *
-// allow Object.Put *
-// allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
-// allow Object.Get any ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
-// allow Object.Get all ResourceCondition:Department=HR RequestCondition:Actor=ownerA *
-// allow Object.* *
-// allow Container.* *
-//
-//nolint:godot
-func ParseAPERule(r *apechain.Rule, rule string) error {
- lexemes, err := shlex.Split(rule)
- if err != nil {
- return fmt.Errorf("can't parse rule '%s': %v", rule, err)
- }
- return parseRuleLexemes(r, lexemes)
-}
-
-func unique(inputSlice []string) []string {
- uniqueSlice := make([]string, 0, len(inputSlice))
- seen := make(map[string]bool, len(inputSlice))
- for _, element := range inputSlice {
- if !seen[element] {
- uniqueSlice = append(uniqueSlice, element)
- seen[element] = true
- }
- }
- return uniqueSlice
-}
-
-func parseRuleLexemes(r *apechain.Rule, lexemes []string) error {
- if len(lexemes) < 2 {
- return errInvalidStatementFormat
- }
-
- var err error
- r.Status, err = parseStatus(lexemes[0])
- if err != nil {
- return err
- }
-
- var objectTargeted bool
- var containerTargeted bool
-
- for i, lexeme := range lexemes[1:] {
- anyExpr, anyErr := parseAnyAll(lexeme)
- if anyErr == nil {
- r.Any = anyExpr
- continue
- }
-
- var names []string
- var actionType bool
- names, actionType, err = parseAction(lexeme)
- if err != nil {
- condition, errCond := parseCondition(lexeme)
- if errCond != nil {
- err = fmt.Errorf("%w:%w", err, errCond)
- lexemes = lexemes[i+1:]
- break
- }
- r.Condition = append(r.Condition, *condition)
- } else {
- if actionType {
- objectTargeted = true
- } else {
- containerTargeted = true
- }
- if objectTargeted && containerTargeted {
- // Actually, APE chain allows to define rules for several resources, for example, if
- // chain target is namespace, but the parser primitevly compiles verbs,
- // conditions and resources in one rule. So, for the parser, one rule relates only to
- // one resource type - object or container.
- return errMixedTypesInRule
- }
-
- r.Actions.Names = append(r.Actions.Names, names...)
- }
- }
- r.Actions.Names = unique(r.Actions.Names)
- if len(r.Actions.Names) == 0 {
- return fmt.Errorf("%w:%w", err, errNoActionsInRule)
- }
- for _, lexeme := range lexemes {
- resource, errRes := parseResource(lexeme, objectTargeted)
- if errRes != nil {
- return fmt.Errorf("%w:%w", err, errRes)
- }
- r.Resources.Names = append(r.Resources.Names, resource)
- }
-
- return nil
-}
-
-func parseAnyAll(lexeme string) (bool, error) {
- switch strings.ToLower(lexeme) {
- case "any":
- return true, nil
- case "all":
- return false, nil
- default:
- return false, errFailedToParseAllAny
- }
-}
-
-func parseStatus(lexeme string) (apechain.Status, error) {
- action, expression, found := strings.Cut(lexeme, ":")
- switch strings.ToLower(action) {
- case "deny":
- if !found {
- return apechain.AccessDenied, nil
- }
- if strings.EqualFold(expression, "QuotaLimitReached") {
- return apechain.QuotaLimitReached, nil
- }
- return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression)
- case "allow":
- if found {
- return 0, errUnknownStatusDetail
- }
- return apechain.Allow, nil
- default:
- return 0, errUnknownStatus
- }
-}
-
-func parseAction(lexeme string) ([]string, bool, error) {
- switch strings.ToLower(lexeme) {
- case "object.put":
- return []string{nativeschema.MethodPutObject}, true, nil
- case "object.get":
- return []string{nativeschema.MethodGetObject}, true, nil
- case "object.head":
- return []string{nativeschema.MethodHeadObject}, true, nil
- case "object.delete":
- return []string{nativeschema.MethodDeleteObject}, true, nil
- case "object.search":
- return []string{nativeschema.MethodSearchObject}, true, nil
- case "object.range":
- return []string{nativeschema.MethodRangeObject}, true, nil
- case "object.hash":
- return []string{nativeschema.MethodHashObject}, true, nil
- case "object.patch":
- return []string{nativeschema.MethodPatchObject}, true, nil
- case "object.*":
- return []string{
- nativeschema.MethodPutObject,
- nativeschema.MethodGetObject,
- nativeschema.MethodHeadObject,
- nativeschema.MethodDeleteObject,
- nativeschema.MethodSearchObject,
- nativeschema.MethodRangeObject,
- nativeschema.MethodHashObject,
- nativeschema.MethodPatchObject,
- }, true, nil
- case "container.put":
- return []string{nativeschema.MethodPutContainer}, false, nil
- case "container.delete":
- return []string{nativeschema.MethodDeleteContainer}, false, nil
- case "container.get":
- return []string{nativeschema.MethodGetContainer}, false, nil
- case "container.list":
- return []string{nativeschema.MethodListContainers}, false, nil
- case "container.*":
- return []string{
- nativeschema.MethodPutContainer,
- nativeschema.MethodDeleteContainer,
- nativeschema.MethodGetContainer,
- nativeschema.MethodListContainers,
- }, false, nil
- default:
- }
- return nil, false, fmt.Errorf("%w: %s", errUnknownAction, lexeme)
-}
-
-func parseResource(lexeme string, isObj bool) (string, error) {
- if len(lexeme) > 0 && !strings.HasSuffix(lexeme, "/") {
- if isObj {
- if lexeme == "*" {
- return nativeschema.ResourceFormatAllObjects, nil
- } else if lexeme == "/*" || lexeme == "root/*" {
- return nativeschema.ResourceFormatRootObjects, nil
- } else if strings.HasPrefix(lexeme, "/") {
- lexeme = lexeme[1:]
- delimCount := strings.Count(lexeme, "/")
- if delimCount == 1 && len(lexeme) >= 3 { // container/object
- return nativeschema.ObjectPrefix + "//" + lexeme, nil
- }
- } else {
- delimCount := strings.Count(lexeme, "/")
- if delimCount == 1 && len(lexeme) >= 3 ||
- delimCount == 2 && len(lexeme) >= 5 { // namespace/container/object
- return nativeschema.ObjectPrefix + "/" + lexeme, nil
- }
- }
- } else {
- if lexeme == "*" {
- return nativeschema.ResourceFormatAllContainers, nil
- } else if lexeme == "/*" || lexeme == "root/*" {
- return nativeschema.ResourceFormatRootContainers, nil
- } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
- lexeme = lexeme[1:]
- delimCount := strings.Count(lexeme, "/")
- if delimCount == 0 {
- return nativeschema.ContainerPrefix + "//" + lexeme, nil
- }
- } else {
- delimCount := strings.Count(lexeme, "/")
- if delimCount == 1 && len(lexeme) > 3 { // namespace/container
- return nativeschema.ContainerPrefix + "/" + lexeme, nil
- }
- }
- }
- }
- return "", errUnsupportedResourceFormat
-}
-
-const (
- ResourceCondition = "resourcecondition"
- RequestCondition = "requestcondition"
-)
-
-var typeToCondKindType = map[string]apechain.ConditionKindType{
- ResourceCondition: apechain.KindResource,
- RequestCondition: apechain.KindRequest,
-}
-
-func parseCondition(lexeme string) (*apechain.Condition, error) {
- typ, expression, found := strings.Cut(lexeme, ":")
- typ = strings.ToLower(typ)
-
- condKindType, ok := typeToCondKindType[typ]
- if ok {
- if !found {
- return nil, fmt.Errorf("%w: %s", errInvalidConditionFormat, lexeme)
- }
-
- var cond apechain.Condition
- cond.Kind = condKindType
-
- lhs, rhs, binExpFound := strings.Cut(expression, "!=")
- if !binExpFound {
- lhs, rhs, binExpFound = strings.Cut(expression, "=")
- if !binExpFound {
- return nil, fmt.Errorf("%w: %s", errUnknownBinaryOperator, expression)
- }
- cond.Op = apechain.CondStringEquals
- } else {
- cond.Op = apechain.CondStringNotEquals
- }
-
- cond.Key, cond.Value = lhs, rhs
- return &cond, nil
- }
- return nil, fmt.Errorf("%w: %s", errUnknownCondObjectType, typ)
-}
diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go
deleted file mode 100644
index c236c4603..000000000
--- a/pkg/util/ape/parser_test.go
+++ /dev/null
@@ -1,328 +0,0 @@
-package ape
-
-import (
- "fmt"
- "testing"
-
- policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
- "github.com/stretchr/testify/require"
-)
-
-func TestParseAPERule(t *testing.T) {
- tests := [...]struct {
- name string
- rule string
- expectErr error
- expectRule policyengine.Rule
- }{
- {
- name: "Valid allow rule for all objects",
- rule: "allow Object.Put *",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
- },
- },
- {
- name: "Valid rule for all objects in implicit root namespace",
- rule: "allow Object.Put /*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
- },
- },
- {
- name: "Valid rule for all objects in explicit root namespace",
- rule: "allow Object.Put root/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
- },
- },
- {
- name: "Valid rule for all containers in explicit root namespace",
- rule: "allow Container.Put root/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
- },
- },
- {
- name: "Valid rule for all objects in root namespace and container",
- rule: "allow Object.Put /cid/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, "cid"),
- }},
- },
- },
- {
- name: "Valid rule for object in root namespace and container",
- rule: "allow Object.Put /cid/oid",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainerObject, "cid", "oid"),
- }},
- },
- },
- {
- name: "Valid rule for all objects in namespace",
- rule: "allow Object.Put ns/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceObjects, "ns"),
- }},
- },
- },
- {
- name: "Valid rule for all objects in namespace and container",
- rule: "allow Object.Put ns/cid/*",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, "ns", "cid"),
- }},
- },
- },
- {
- name: "Valid rule for object in namespace and container",
- rule: "allow Object.Put ns/cid/oid",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObject, "ns", "cid", "oid"),
- }},
- },
- },
- {
- name: "Valid deny rule",
- rule: "deny Object.Put *",
- expectRule: policyengine.Rule{
- Status: policyengine.AccessDenied,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
- },
- },
- {
- name: "Valid deny rule with action detail",
- rule: "deny:QuotaLimitReached Object.Put *",
- expectRule: policyengine.Rule{
- Status: policyengine.QuotaLimitReached,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
- },
- },
- {
- name: "Valid allow rule with conditions",
- rule: "allow Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *",
- expectRule: policyengine.Rule{
- Status: policyengine.Allow,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
- Condition: []policyengine.Condition{
- {
- Op: policyengine.CondStringEquals,
- Kind: policyengine.KindResource,
- Key: "Department",
- Value: "HR",
- },
- {
- Op: policyengine.CondStringNotEquals,
- Kind: policyengine.KindRequest,
- Key: "Actor",
- Value: "ownerA",
- },
- },
- },
- },
- {
- name: "Valid rule for object with conditions with action detail",
- rule: "deny:QuotaLimitReached Object.Get ResourceCondition:Department=HR RequestCondition:Actor!=ownerA *",
- expectRule: policyengine.Rule{
- Status: policyengine.QuotaLimitReached,
- Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
- Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatAllObjects}},
- Condition: []policyengine.Condition{
- {
- Op: policyengine.CondStringEquals,
- Kind: policyengine.KindResource,
- Key: "Department",
- Value: "HR",
- },
- {
- Op: policyengine.CondStringNotEquals,
- Kind: policyengine.KindRequest,
- Key: "Actor",
- Value: "ownerA",
- },
- },
- },
- },
- {
- name: "Invalid rule with unknown status",
- rule: "permit Object.Put *",
- expectErr: errUnknownStatus,
- },
- {
- name: "Invalid rule with unknown action",
- rule: "allow Object.PutOut *",
- expectErr: errUnknownAction,
- },
- {
- name: "Invalid rule with unknown status detail",
- rule: "deny:UnknownActionDetail Object.Put *",
- expectErr: errUnknownStatusDetail,
- },
- {
- name: "Invalid rule with unknown condition binary operator",
- rule: "deny Object.Put ResourceCondition:Department
= '0' && sym <= '9'
+}
+
+func isUpperAlpha(sym uint8) bool {
+ return sym >= 'A' && sym <= 'Z'
+}
diff --git a/pkg/util/locode/db/airports/calls.go b/pkg/util/locode/db/airports/calls.go
new file mode 100644
index 000000000..dac8cce8b
--- /dev/null
+++ b/pkg/util/locode/db/airports/calls.go
@@ -0,0 +1,194 @@
+package airportsdb
+
+import (
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+)
+
+const (
+ _ = iota - 1
+
+ _ // Airport ID
+ _ // Name
+ airportCity
+ airportCountry
+ airportIATA
+ _ // ICAO
+ airportLatitude
+ airportLongitude
+ _ // Altitude
+ _ // Timezone
+ _ // DST
+ _ // Tz database time zone
+ _ // Type
+ _ // Source
+
+ airportFldNum
+)
+
+type record struct {
+ city,
+ country,
+ iata,
+ lat,
+ lng string
+}
+
+// Get scans the records of the OpenFlights Airport to an in-memory table (once),
+// and returns an entry that matches the passed UN/LOCODE record.
+//
+// Records are matched if they have the same country code and either
+// same IATA code or same city name (location name in UN/LOCODE).
+//
+// Returns locodedb.ErrAirportNotFound if no entry matches.
+func (db *DB) Get(locodeRecord locode.Record) (*locodedb.AirportRecord, error) {
+ if err := db.initAirports(); err != nil {
+ return nil, err
+ }
+
+ records := db.mAirports[locodeRecord.LOCODE.CountryCode()]
+
+ for i := range records {
+ if locodeRecord.LOCODE.LocationCode() != records[i].iata &&
+ locodeRecord.NameWoDiacritics != records[i].city {
+ continue
+ }
+
+ lat, err := strconv.ParseFloat(records[i].lat, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ lng, err := strconv.ParseFloat(records[i].lng, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return &locodedb.AirportRecord{
+ CountryName: records[i].country,
+ Point: locodedb.NewPoint(lat, lng),
+ }, nil
+ }
+
+ return nil, locodedb.ErrAirportNotFound
+}
+
+const (
+ _ = iota - 1
+
+ countryName
+ countryISOCode
+ _ // dafif_code
+
+ countryFldNum
+)
+
+// CountryName scans the records of the OpenFlights Country table to an in-memory table (once),
+// and returns the name of the country by code.
+//
+// Returns locodedb.ErrCountryNotFound if no entry matches.
+func (db *DB) CountryName(code *locodedb.CountryCode) (name string, err error) {
+ if err = db.initCountries(); err != nil {
+ return
+ }
+
+ argCode := code.String()
+
+ for cName, cCode := range db.mCountries {
+ if cCode == argCode {
+ name = cName
+ break
+ }
+ }
+
+ if name == "" {
+ err = locodedb.ErrCountryNotFound
+ }
+
+ return
+}
+
+func (db *DB) initAirports() (err error) {
+ db.airportsOnce.Do(func() {
+ db.mAirports = make(map[string][]record)
+
+ if err = db.initCountries(); err != nil {
+ return
+ }
+
+ err = db.scanWords(db.airports, airportFldNum, func(words []string) error {
+ countryCode := db.mCountries[words[airportCountry]]
+ if countryCode != "" {
+ db.mAirports[countryCode] = append(db.mAirports[countryCode], record{
+ city: words[airportCity],
+ country: words[airportCountry],
+ iata: words[airportIATA],
+ lat: words[airportLatitude],
+ lng: words[airportLongitude],
+ })
+ }
+
+ return nil
+ })
+ })
+
+ return
+}
+
+func (db *DB) initCountries() (err error) {
+ db.countriesOnce.Do(func() {
+ db.mCountries = make(map[string]string)
+
+ err = db.scanWords(db.countries, countryFldNum, func(words []string) error {
+ db.mCountries[words[countryName]] = words[countryISOCode]
+
+ return nil
+ })
+ })
+
+ return
+}
+
+var errScanInt = errors.New("interrupt scan")
+
+func (db *DB) scanWords(pm pathMode, num int, wordsHandler func([]string) error) error {
+ tableFile, err := os.OpenFile(pm.path, os.O_RDONLY, pm.mode)
+ if err != nil {
+ return err
+ }
+
+ defer tableFile.Close()
+
+ r := csv.NewReader(tableFile)
+ r.ReuseRecord = true
+
+ for {
+ words, err := r.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return err
+ } else if ln := len(words); ln != num {
+ return fmt.Errorf("unexpected number of words %d", ln)
+ }
+
+ if err := wordsHandler(words); err != nil {
+ if errors.Is(err, errScanInt) {
+ break
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/util/locode/db/airports/db.go b/pkg/util/locode/db/airports/db.go
new file mode 100644
index 000000000..acfa3fd60
--- /dev/null
+++ b/pkg/util/locode/db/airports/db.go
@@ -0,0 +1,83 @@
+package airportsdb
+
+import (
+ "fmt"
+ "io/fs"
+ "sync"
+)
+
+// Prm groups the required parameters of the DB's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Path to OpenFlights Airport csv table.
+ //
+ // Must not be empty.
+ AirportsPath string
+
+ // Path to OpenFlights Countries csv table.
+ //
+ // Must not be empty.
+ CountriesPath string
+}
+
+// DB is a descriptor of the OpenFlights database in csv format.
+//
+// For correct operation, DB must be created
+// using the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// The DB is immediately ready to work through API.
+type DB struct {
+ airports, countries pathMode
+
+ airportsOnce, countriesOnce sync.Once
+
+ mCountries map[string]string
+
+ mAirports map[string][]record
+}
+
+type pathMode struct {
+ path string
+ mode fs.FileMode
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the DB.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created DB does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm, opts ...Option) *DB {
+ switch {
+ case prm.AirportsPath == "":
+ panicOnPrmValue("AirportsPath", prm.AirportsPath)
+ case prm.CountriesPath == "":
+ panicOnPrmValue("CountriesPath", prm.CountriesPath)
+ }
+
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ return &DB{
+ airports: pathMode{
+ path: prm.AirportsPath,
+ mode: o.airportMode,
+ },
+ countries: pathMode{
+ path: prm.CountriesPath,
+ mode: o.countryMode,
+ },
+ }
+}
diff --git a/pkg/util/locode/db/airports/opts.go b/pkg/util/locode/db/airports/opts.go
new file mode 100644
index 000000000..3799d9e27
--- /dev/null
+++ b/pkg/util/locode/db/airports/opts.go
@@ -0,0 +1,19 @@
+package airportsdb
+
+import (
+ "io/fs"
+)
+
+// Option sets an optional parameter of DB.
+type Option func(*options)
+
+type options struct {
+ airportMode, countryMode fs.FileMode
+}
+
+func defaultOpts() *options {
+ return &options{
+ airportMode: fs.ModePerm, // 0777
+ countryMode: fs.ModePerm, // 0777
+ }
+}
diff --git a/pkg/util/locode/db/boltdb/calls.go b/pkg/util/locode/db/boltdb/calls.go
new file mode 100644
index 000000000..6a80def3a
--- /dev/null
+++ b/pkg/util/locode/db/boltdb/calls.go
@@ -0,0 +1,166 @@
+package locodebolt
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "path/filepath"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ "go.etcd.io/bbolt"
+)
+
+// Open opens an underlying BoltDB instance.
+//
+// Timeout of BoltDB opening is 3s (only for Linux or Darwin).
+//
+// Opens BoltDB in read-only mode if DB is read-only.
+func (db *DB) Open() error {
+ // copy-paste from metabase:
+ // consider universal Open/Close for BoltDB wrappers
+
+ err := util.MkdirAllX(filepath.Dir(db.path), db.mode)
+ if err != nil {
+ return fmt.Errorf("could not create dir for BoltDB: %w", err)
+ }
+
+ db.bolt, err = bbolt.Open(db.path, db.mode, db.boltOpts)
+ if err != nil {
+ return fmt.Errorf("could not open BoltDB: %w", err)
+ }
+
+ return nil
+}
+
+// Close closes an underlying BoltDB instance.
+//
+// Must not be called before successful Open call.
+func (db *DB) Close() error {
+ return db.bolt.Close()
+}
+
+func countryBucketKey(cc *locodedb.CountryCode) ([]byte, error) {
+ return []byte(cc.String()), nil
+}
+
+func locationBucketKey(lc *locodedb.LocationCode) ([]byte, error) {
+ return []byte(lc.String()), nil
+}
+
+type recordJSON struct {
+ CountryName string
+ LocationName string
+ SubDivName string
+ SubDivCode string
+ Latitude float64
+ Longitude float64
+ Continent string
+}
+
+func recordValue(r locodedb.Record) ([]byte, error) {
+ p := r.GeoPoint()
+
+ rj := &recordJSON{
+ CountryName: r.CountryName(),
+ LocationName: r.LocationName(),
+ SubDivName: r.SubDivName(),
+ SubDivCode: r.SubDivCode(),
+ Latitude: p.Latitude(),
+ Longitude: p.Longitude(),
+ Continent: r.Continent().String(),
+ }
+
+ return json.Marshal(rj)
+}
+
+func recordFromValue(data []byte) (*locodedb.Record, error) {
+ rj := new(recordJSON)
+
+ if err := json.Unmarshal(data, rj); err != nil {
+ return nil, err
+ }
+
+ r := new(locodedb.Record)
+ r.SetCountryName(rj.CountryName)
+ r.SetLocationName(rj.LocationName)
+ r.SetSubDivName(rj.SubDivName)
+ r.SetSubDivCode(rj.SubDivCode)
+ r.SetGeoPoint(locodedb.NewPoint(rj.Latitude, rj.Longitude))
+
+ cont := locodedb.ContinentFromString(rj.Continent)
+ r.SetContinent(&cont)
+
+ return r, nil
+}
+
+// Put saves the record by key in an underlying BoltDB instance.
+//
+// Country code from the key is used for allocating the 1st level buckets.
+// Records are stored in country buckets by the location code from the key.
+// The records are stored in internal binary JSON format.
+//
+// Must not be called before successful Open call.
+// Must not be called in read-only mode: behavior is undefined.
+func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error {
+ return db.bolt.Batch(func(tx *bbolt.Tx) error {
+ countryKey, err := countryBucketKey(key.CountryCode())
+ if err != nil {
+ return err
+ }
+
+ bktCountry, err := tx.CreateBucketIfNotExists(countryKey)
+ if err != nil {
+ return fmt.Errorf("could not create country bucket: %w", err)
+ }
+
+ locationKey, err := locationBucketKey(key.LocationCode())
+ if err != nil {
+ return err
+ }
+
+ cont, err := recordValue(rec)
+ if err != nil {
+ return err
+ }
+
+ return bktCountry.Put(locationKey, cont)
+ })
+}
+
+var errRecordNotFound = errors.New("record not found")
+
+// Get reads the record by key from underlying BoltDB instance.
+//
+// Returns an error if no record is presented by key in DB.
+//
+// Must not be called before successful Open call.
+func (db *DB) Get(key locodedb.Key) (rec *locodedb.Record, err error) {
+ err = db.bolt.View(func(tx *bbolt.Tx) error {
+ countryKey, err := countryBucketKey(key.CountryCode())
+ if err != nil {
+ return err
+ }
+
+ bktCountry := tx.Bucket(countryKey)
+ if bktCountry == nil {
+ return errRecordNotFound
+ }
+
+ locationKey, err := locationBucketKey(key.LocationCode())
+ if err != nil {
+ return err
+ }
+
+ data := bktCountry.Get(locationKey)
+ if data == nil {
+ return errRecordNotFound
+ }
+
+ rec, err = recordFromValue(data)
+
+ return err
+ })
+
+ return
+}
diff --git a/pkg/util/locode/db/boltdb/db.go b/pkg/util/locode/db/boltdb/db.go
new file mode 100644
index 000000000..3d09a797d
--- /dev/null
+++ b/pkg/util/locode/db/boltdb/db.go
@@ -0,0 +1,73 @@
+package locodebolt
+
+import (
+ "fmt"
+ "io/fs"
+
+ "go.etcd.io/bbolt"
+)
+
+// Prm groups the required parameters of the DB's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Path to BoltDB file with FrostFS location database.
+ //
+ // Must not be empty.
+ Path string
+}
+
+// DB is a descriptor of the FrostFS BoltDB location database.
+//
+// For correct operation, DB must be created
+// using the constructor (New) based on the required parameters
+// and optional components.
+//
+// After successful creation,
+// DB must be opened through Open call. After successful opening,
+// DB is ready to work through API (until Close call).
+//
+// Upon completion of work with the DB, it must be closed
+// by Close method.
+type DB struct {
+ path string
+
+ mode fs.FileMode
+
+ boltOpts *bbolt.Options
+
+ bolt *bbolt.DB
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the DB.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created DB requires calling the Open method in order
+// to initialize required resources.
+func New(prm Prm, opts ...Option) *DB {
+ switch {
+ case prm.Path == "":
+ panicOnPrmValue("Path", prm.Path)
+ }
+
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ return &DB{
+ path: prm.Path,
+ mode: o.mode,
+ boltOpts: o.boltOpts,
+ }
+}
diff --git a/pkg/util/locode/db/boltdb/opts.go b/pkg/util/locode/db/boltdb/opts.go
new file mode 100644
index 000000000..db0cccd3a
--- /dev/null
+++ b/pkg/util/locode/db/boltdb/opts.go
@@ -0,0 +1,37 @@
+package locodebolt
+
+import (
+ "io/fs"
+ "os"
+ "time"
+
+ "go.etcd.io/bbolt"
+)
+
+// Option sets an optional parameter of DB.
+type Option func(*options)
+
+type options struct {
+ mode fs.FileMode
+
+ boltOpts *bbolt.Options
+}
+
+func defaultOpts() *options {
+ return &options{
+ mode: os.ModePerm, // 0777
+ boltOpts: &bbolt.Options{
+ Timeout: 3 * time.Second,
+ },
+ }
+}
+
+// ReadOnly enables read-only mode of the DB.
+//
+// Do not call DB.Put method on instances with
+// this option: the behavior is undefined.
+func ReadOnly() Option {
+ return func(o *options) {
+ o.boltOpts.ReadOnly = true
+ }
+}
diff --git a/pkg/util/locode/db/continent.go b/pkg/util/locode/db/continent.go
new file mode 100644
index 000000000..863af7b57
--- /dev/null
+++ b/pkg/util/locode/db/continent.go
@@ -0,0 +1,81 @@
+package locodedb
+
+// Continent is an enumeration of Earth's continent.
+type Continent uint8
+
+const (
+ // ContinentUnknown is an undefined Continent value.
+ ContinentUnknown = iota
+
+ // ContinentEurope corresponds to Europe.
+ ContinentEurope
+
+ // ContinentAfrica corresponds to Africa.
+ ContinentAfrica
+
+ // ContinentNorthAmerica corresponds to North America.
+ ContinentNorthAmerica
+
+ // ContinentSouthAmerica corresponds to South America.
+ ContinentSouthAmerica
+
+ // ContinentAsia corresponds to Asia.
+ ContinentAsia
+
+ // ContinentAntarctica corresponds to Antarctica.
+ ContinentAntarctica
+
+ // ContinentOceania corresponds to Oceania.
+ ContinentOceania
+)
+
+// Is checks if c is the same continent as c2.
+func (c *Continent) Is(c2 Continent) bool {
+ return *c == c2
+}
+
+func (c Continent) String() string {
+ switch c {
+ case ContinentUnknown:
+ fallthrough
+ default:
+ return "Unknown"
+ case ContinentEurope:
+ return "Europe"
+ case ContinentAfrica:
+ return "Africa"
+ case ContinentNorthAmerica:
+ return "North America"
+ case ContinentSouthAmerica:
+ return "South America"
+ case ContinentAsia:
+ return "Asia"
+ case ContinentAntarctica:
+ return "Antarctica"
+ case ContinentOceania:
+ return "Oceania"
+ }
+}
+
+// ContinentFromString returns Continent value
+// corresponding to the passed string representation.
+func ContinentFromString(str string) Continent {
+ switch str {
+ default:
+ return ContinentUnknown
+ case "Europe":
+ return ContinentEurope
+ case "Africa":
+ return ContinentAfrica
+ case "North America":
+ return ContinentNorthAmerica
+ case "South America":
+ return ContinentSouthAmerica
+ case "Asia":
+ return ContinentAsia
+ case "Antarctica":
+ return ContinentAntarctica
+ case "Oceania":
+ return ContinentOceania
+ }
+}
diff --git a/pkg/util/locode/db/continents/geojson/calls.go b/pkg/util/locode/db/continents/geojson/calls.go
new file mode 100644
index 000000000..34467d5a2
--- /dev/null
+++ b/pkg/util/locode/db/continents/geojson/calls.go
@@ -0,0 +1,98 @@
+package continentsdb
+
+import (
+ "fmt"
+ "os"
+
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ "github.com/paulmach/orb"
+ "github.com/paulmach/orb/geojson"
+ "github.com/paulmach/orb/planar"
+)
+
+const continentProperty = "Continent"
+
+// PointContinent goes through all polygons and returns the continent
+// in which the point is located.
+//
+// Returns locodedb.ContinentUnknown if no entry matches.
+//
+// All GeoJSON feature are parsed from file once and stored in memory.
+func (db *DB) PointContinent(point *locodedb.Point) (*locodedb.Continent, error) {
+ var err error
+
+ db.once.Do(func() {
+ err = db.init()
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ planarPoint := orb.Point{point.Longitude(), point.Latitude()}
+
+ var (
+ continent string
+ minDst float64
+ )
+
+ for _, feature := range db.features {
+ if multiPolygon, ok := feature.Geometry.(orb.MultiPolygon); ok {
+ if planar.MultiPolygonContains(multiPolygon, planarPoint) {
+ continent = feature.Properties.MustString(continentProperty)
+ break
+ }
+ } else if polygon, ok := feature.Geometry.(orb.Polygon); ok {
+ if planar.PolygonContains(polygon, planarPoint) {
+ continent = feature.Properties.MustString(continentProperty)
+ break
+ }
+ }
+ distance := planar.DistanceFrom(feature.Geometry, planarPoint)
+ if minDst == 0 || minDst > distance {
+ minDst = distance
+ continent = feature.Properties.MustString(continentProperty)
+ }
+ }
+
+ c := continentFromString(continent)
+
+ return &c, nil
+}
+
+func (db *DB) init() error {
+ data, err := os.ReadFile(db.path)
+ if err != nil {
+ return fmt.Errorf("could not read data file: %w", err)
+ }
+
+ features, err := geojson.UnmarshalFeatureCollection(data)
+ if err != nil {
+ return fmt.Errorf("could not unmarshal GeoJSON feature collection: %w", err)
+ }
+
+ db.features = features.Features
+
+ return nil
+}
+
+func continentFromString(c string) locodedb.Continent {
+ switch c {
+ default:
+ return locodedb.ContinentUnknown
+ case "Africa":
+ return locodedb.ContinentAfrica
+ case "Asia":
+ return locodedb.ContinentAsia
+ case "Europe":
+ return locodedb.ContinentEurope
+ case "North America":
+ return locodedb.ContinentNorthAmerica
+ case "South America":
+ return locodedb.ContinentSouthAmerica
+ case "Antarctica":
+ return locodedb.ContinentAntarctica
+ case "Australia", "Oceania":
+ return locodedb.ContinentOceania
+ }
+}
diff --git a/pkg/util/locode/db/continents/geojson/db.go b/pkg/util/locode/db/continents/geojson/db.go
new file mode 100644
index 000000000..ee43bd810
--- /dev/null
+++ b/pkg/util/locode/db/continents/geojson/db.go
@@ -0,0 +1,63 @@
+package continentsdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/paulmach/orb/geojson"
+)
+
+// Prm groups the required parameters of the DB's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Path to polygons of Earth's continents in GeoJSON format.
+ //
+ // Must not be empty.
+ Path string
+}
+
+// DB is a descriptor of the Earth's polygons in GeoJSON format.
+//
+// For correct operation, DB must be created
+// using the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// The DB is immediately ready to work through API.
+type DB struct {
+ path string
+
+ once sync.Once
+
+ features []*geojson.Feature
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the DB.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created DB does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm, opts ...Option) *DB {
+ switch {
+ case prm.Path == "":
+ panicOnPrmValue("Path", prm.Path)
+ }
+
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ return &DB{
+ path: prm.Path,
+ }
+}
diff --git a/pkg/util/locode/db/continents/geojson/opts.go b/pkg/util/locode/db/continents/geojson/opts.go
new file mode 100644
index 000000000..59831fcc5
--- /dev/null
+++ b/pkg/util/locode/db/continents/geojson/opts.go
@@ -0,0 +1,10 @@
+package continentsdb
+
+// Option sets an optional parameter of DB.
+type Option func(*options)
+
+type options struct{}
+
+func defaultOpts() *options {
+ return &options{}
+}
diff --git a/pkg/util/locode/db/country.go b/pkg/util/locode/db/country.go
new file mode 100644
index 000000000..2d13c6ef9
--- /dev/null
+++ b/pkg/util/locode/db/country.go
@@ -0,0 +1,32 @@
+package locodedb
+
+import (
+ "fmt"
+
+ locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
+)
+
+// CountryCode represents a country code for
+// the storage in the FrostFS location database.
+type CountryCode locodecolumn.CountryCode
+
+// CountryCodeFromString parses a string UN/LOCODE country code
+// and returns a CountryCode.
+func CountryCodeFromString(s string) (*CountryCode, error) {
+ cc, err := locodecolumn.CountryCodeFromString(s)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse country code: %w", err)
+ }
+
+ return CountryFromColumn(cc)
+}
+
+// CountryFromColumn converts a UN/LOCODE country code to a CountryCode.
+func CountryFromColumn(cc *locodecolumn.CountryCode) (*CountryCode, error) {
+ return (*CountryCode)(cc), nil
+}
+
+func (c *CountryCode) String() string {
+ syms := (*locodecolumn.CountryCode)(c).Symbols()
+ return string(syms[:])
+}
diff --git a/pkg/util/locode/db/db.go b/pkg/util/locode/db/db.go
new file mode 100644
index 000000000..8c71ea794
--- /dev/null
+++ b/pkg/util/locode/db/db.go
@@ -0,0 +1,183 @@
+package locodedb
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ "golang.org/x/sync/errgroup"
+)
+
+// SourceTable is an interface of the UN/LOCODE table.
+type SourceTable interface {
+ // Must iterate over all entries of the table
+ // and pass next entry to the handler.
+ //
+ // Must return handler's errors directly.
+ IterateAll(func(locode.Record) error) error
+}
+
+// DB is an interface of FrostFS location database.
+type DB interface {
+ // Must save the record by key in the database.
+ Put(Key, Record) error
+
+ // Must return the record by key from the database.
+ Get(Key) (*Record, error)
+}
+
+// AirportRecord represents the entry in FrostFS airport database.
+type AirportRecord struct {
+ // Name of the country where airport is located.
+ CountryName string
+
+ // Geo point where airport is located.
+ Point *Point
+}
+
+// ErrAirportNotFound is returned by AirportRecord readers
+// when the required airport is not found.
+var ErrAirportNotFound = errors.New("airport not found")
+
+// AirportDB is an interface of FrostFS airport database.
+type AirportDB interface {
+ // Must return the record by UN/LOCODE table record.
+ //
+ // Must return ErrAirportNotFound if there is no
+ // related airport in the database.
+ Get(locode.Record) (*AirportRecord, error)
+}
+
+// ContinentsDB is an interface of FrostFS continent database.
+type ContinentsDB interface {
+ // Must return continent of the geo point.
+ PointContinent(*Point) (*Continent, error)
+}
+
+var ErrSubDivNotFound = errors.New("subdivision not found")
+
+var ErrCountryNotFound = errors.New("country not found")
+
+// NamesDB is an interface of the FrostFS location namespace.
+type NamesDB interface {
+ // Must resolve a country code to a country name.
+ //
+ // Must return ErrCountryNotFound if there is no
+ // country with the provided code.
+ CountryName(*CountryCode) (string, error)
+
+ // Must resolve (country code, subdivision code) to
+ // a subdivision name.
+ //
+ // Must return ErrSubDivNotFound if either country or
+ // subdivision is not presented in database.
+ SubDivName(*CountryCode, string) (string, error)
+}
+
+// FillDatabase generates the FrostFS location database based on the UN/LOCODE table.
+func FillDatabase(table SourceTable, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error {
+ var errG errgroup.Group
+
+ // Pick some sane default, after this the performance stopped increasing.
+ errG.SetLimit(runtime.NumCPU() * 4)
+ _ = table.IterateAll(func(tableRecord locode.Record) error {
+ errG.Go(func() error {
+ return processTableRecord(tableRecord, airports, continents, names, db)
+ })
+ return nil
+ })
+ return errG.Wait()
+}
+
+func processTableRecord(tableRecord locode.Record, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error {
+ if tableRecord.LOCODE.LocationCode() == "" {
+ return nil
+ }
+
+ dbKey, err := NewKey(tableRecord.LOCODE)
+ if err != nil {
+ return err
+ }
+
+ dbRecord, err := NewRecord(tableRecord)
+ if err != nil {
+ if errors.Is(err, errParseCoordinates) {
+ return nil
+ }
+
+ return err
+ }
+
+ geoPoint := dbRecord.GeoPoint()
+ countryName := ""
+
+ if geoPoint == nil {
+ airportRecord, err := airports.Get(tableRecord)
+ if err != nil {
+ if errors.Is(err, ErrAirportNotFound) {
+ return nil
+ }
+
+ return err
+ }
+
+ geoPoint = airportRecord.Point
+ countryName = airportRecord.CountryName
+ }
+
+ dbRecord.SetGeoPoint(geoPoint)
+
+ if countryName == "" {
+ countryName, err = names.CountryName(dbKey.CountryCode())
+ if err != nil {
+ if errors.Is(err, ErrCountryNotFound) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ dbRecord.SetCountryName(countryName)
+
+ if subDivCode := dbRecord.SubDivCode(); subDivCode != "" {
+ subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode)
+ if err != nil {
+ if errors.Is(err, ErrSubDivNotFound) {
+ return nil
+ }
+
+ return err
+ }
+
+ dbRecord.SetSubDivName(subDivName)
+ }
+
+ continent, err := continents.PointContinent(geoPoint)
+ if err != nil {
+ return fmt.Errorf("could not calculate continent geo point: %w", err)
+ } else if continent.Is(ContinentUnknown) {
+ return nil
+ }
+
+ dbRecord.SetContinent(continent)
+
+ return db.Put(*dbKey, *dbRecord)
+}
+
+// LocodeRecord returns the record from the FrostFS location database
+// corresponding to the string representation of UN/LOCODE.
+func LocodeRecord(db DB, sLocode string) (*Record, error) {
+ lc, err := locode.FromString(sLocode)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse locode: %w", err)
+ }
+
+ key, err := NewKey(*lc)
+ if err != nil {
+ return nil, err
+ }
+
+ return db.Get(*key)
+}
diff --git a/pkg/util/locode/db/location.go b/pkg/util/locode/db/location.go
new file mode 100644
index 000000000..d22979170
--- /dev/null
+++ b/pkg/util/locode/db/location.go
@@ -0,0 +1,32 @@
+package locodedb
+
+import (
+ "fmt"
+
+ locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
+)
+
+// LocationCode represents a location code for
+// the storage in the FrostFS location database.
+type LocationCode locodecolumn.LocationCode
+
+// LocationCodeFromString parses a string UN/LOCODE location code
+// and returns a LocationCode.
+func LocationCodeFromString(s string) (*LocationCode, error) {
+ lc, err := locodecolumn.LocationCodeFromString(s)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse location code: %w", err)
+ }
+
+ return LocationFromColumn(lc)
+}
+
+// LocationFromColumn converts a UN/LOCODE country code to a LocationCode.
+func LocationFromColumn(cc *locodecolumn.LocationCode) (*LocationCode, error) {
+ return (*LocationCode)(cc), nil
+}
+
+func (l *LocationCode) String() string {
+ syms := (*locodecolumn.LocationCode)(l).Symbols()
+ return string(syms[:])
+}
diff --git a/pkg/util/locode/db/point.go b/pkg/util/locode/db/point.go
new file mode 100644
index 000000000..72daebb2c
--- /dev/null
+++ b/pkg/util/locode/db/point.go
@@ -0,0 +1,93 @@
+package locodedb
+
+import (
+ "fmt"
+ "strconv"
+
+ locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
+)
+
+// Point represents a 2D geographic point.
+type Point struct {
+ lat, lng float64
+}
+
+// NewPoint creates, initializes and returns a new Point.
+func NewPoint(lat, lng float64) *Point {
+ return &Point{
+ lat: lat,
+ lng: lng,
+ }
+}
+
+// Latitude returns the Point's latitude.
+func (p Point) Latitude() float64 {
+ return p.lat
+}
+
+// Longitude returns the Point's longitude.
+func (p Point) Longitude() float64 {
+ return p.lng
+}
+
+// PointFromCoordinates converts a UN/LOCODE coordinates to a Point.
+func PointFromCoordinates(crd *locodecolumn.Coordinates) (*Point, error) {
+ if crd == nil {
+ return nil, nil
+ }
+
+ cLat := crd.Latitude()
+ cLatDeg := cLat.Degrees()
+ cLatMnt := cLat.Minutes()
+
+ lat, err := toDecimal(cLatDeg[:], cLatMnt[:])
+ if err != nil {
+ return nil, fmt.Errorf("could not parse latitude: %w", err)
+ }
+
+ if !cLat.Hemisphere().North() {
+ lat = -lat
+ }
+
+ cLng := crd.Longitude()
+ cLngDeg := cLng.Degrees()
+ cLngMnt := cLng.Minutes()
+
+ lng, err := toDecimal(cLngDeg[:], cLngMnt[:])
+ if err != nil {
+ return nil, fmt.Errorf("could not parse longitude: %w", err)
+ }
+
+ if !cLng.Hemisphere().East() {
+ lng = -lng
+ }
+
+ return &Point{
+ lat: lat,
+ lng: lng,
+ }, nil
+}
+
+func toDecimal(intRaw, minutesRaw []byte) (float64, error) {
+ integer, err := strconv.ParseFloat(string(intRaw), 64)
+ if err != nil {
+ return 0, fmt.Errorf("could not parse integer part: %w", err)
+ }
+
+ decimal, err := minutesToDegrees(minutesRaw)
+ if err != nil {
+ return 0, fmt.Errorf("could not parse decimal part: %w", err)
+ }
+
+ return integer + decimal, nil
+}
+
+// minutesToDegrees converts minutes to decimal part of a degree.
+func minutesToDegrees(raw []byte) (float64, error) {
+ minutes, err := strconv.ParseFloat(string(raw), 64)
+ if err != nil {
+ return 0, err
+ }
+
+ return minutes / 60, nil
+}
diff --git a/pkg/util/locode/db/point_test.go b/pkg/util/locode/db/point_test.go
new file mode 100644
index 000000000..f91c0cf87
--- /dev/null
+++ b/pkg/util/locode/db/point_test.go
@@ -0,0 +1,51 @@
+package locodedb
+
+import (
+ "testing"
+
+ locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPointFromCoordinates(t *testing.T) {
+ testCases := []struct {
+ latGot, longGot string
+ latWant, longWant float64
+ }{
+ {
+ latGot: "5915N",
+ longGot: "01806E",
+ latWant: 59.25,
+ longWant: 18.10,
+ },
+ {
+ latGot: "1000N",
+ longGot: "02030E",
+ latWant: 10.00,
+ longWant: 20.50,
+ },
+ {
+ latGot: "0145S",
+ longGot: "03512W",
+ latWant: -01.75,
+ longWant: -35.20,
+ },
+ }
+
+ var (
+ crd *locodecolumn.Coordinates
+ point *Point
+ err error
+ )
+
+ for _, test := range testCases {
+ crd, err = locodecolumn.CoordinatesFromString(test.latGot + " " + test.longGot)
+ require.NoError(t, err)
+
+ point, err = PointFromCoordinates(crd)
+ require.NoError(t, err)
+
+ require.Equal(t, test.latWant, point.Latitude())
+ require.Equal(t, test.longWant, point.Longitude())
+ }
+}
diff --git a/pkg/util/locode/db/record.go b/pkg/util/locode/db/record.go
new file mode 100644
index 000000000..4c414079f
--- /dev/null
+++ b/pkg/util/locode/db/record.go
@@ -0,0 +1,140 @@
+package locodedb
+
+import (
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
+)
+
+// Key represents the key in FrostFS location database.
+type Key struct {
+ cc *CountryCode
+
+ lc *LocationCode
+}
+
+// NewKey calculates Key from LOCODE.
+func NewKey(lc locode.LOCODE) (*Key, error) {
+ country, err := CountryCodeFromString(lc.CountryCode())
+ if err != nil {
+ return nil, fmt.Errorf("could not parse country: %w", err)
+ }
+
+ location, err := LocationCodeFromString(lc.LocationCode())
+ if err != nil {
+ return nil, fmt.Errorf("could not parse location: %w", err)
+ }
+
+ return &Key{
+ cc: country,
+ lc: location,
+ }, nil
+}
+
+// CountryCode returns the location's country code.
+func (k *Key) CountryCode() *CountryCode {
+ return k.cc
+}
+
+// LocationCode returns the location code.
+func (k *Key) LocationCode() *LocationCode {
+ return k.lc
+}
+
+// Record represents the entry in FrostFS location database.
+type Record struct {
+ countryName string
+
+ locationName string
+
+ subDivName string
+
+ subDivCode string
+
+ p *Point
+
+ cont *Continent
+}
+
+var errParseCoordinates = errors.New("invalid coordinates")
+
+// NewRecord calculates the Record from the UN/LOCODE table record.
+func NewRecord(r locode.Record) (*Record, error) {
+ crd, err := locodecolumn.CoordinatesFromString(r.Coordinates)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errParseCoordinates, err)
+ }
+
+ point, err := PointFromCoordinates(crd)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse geo point: %w", err)
+ }
+
+ return &Record{
+ locationName: r.NameWoDiacritics,
+ subDivCode: r.SubDiv,
+ p: point,
+ }, nil
+}
+
+// CountryName returns the country name.
+func (r *Record) CountryName() string {
+ return r.countryName
+}
+
+// SetCountryName sets the country name.
+func (r *Record) SetCountryName(name string) {
+ r.countryName = name
+}
+
+// LocationName returns the location name.
+func (r *Record) LocationName() string {
+ return r.locationName
+}
+
+// SetLocationName sets the location name.
+func (r *Record) SetLocationName(name string) {
+ r.locationName = name
+}
+
+// SubDivCode returns the subdivision code.
+func (r *Record) SubDivCode() string {
+ return r.subDivCode
+}
+
+// SetSubDivCode sets the subdivision code.
+func (r *Record) SetSubDivCode(name string) {
+ r.subDivCode = name
+}
+
+// SubDivName returns the subdivision name.
+func (r *Record) SubDivName() string {
+ return r.subDivName
+}
+
+// SetSubDivName sets the subdivision name.
+func (r *Record) SetSubDivName(name string) {
+ r.subDivName = name
+}
+
+// GeoPoint returns geo point of the location.
+func (r *Record) GeoPoint() *Point {
+ return r.p
+}
+
+// SetGeoPoint sets geo point of the location.
+func (r *Record) SetGeoPoint(p *Point) {
+ r.p = p
+}
+
+// Continent returns the location continent.
+func (r *Record) Continent() *Continent {
+ return r.cont
+}
+
+// SetContinent sets the location continent.
+func (r *Record) SetContinent(c *Continent) {
+ r.cont = c
+}
diff --git a/pkg/util/locode/record.go b/pkg/util/locode/record.go
new file mode 100644
index 000000000..7db746ff3
--- /dev/null
+++ b/pkg/util/locode/record.go
@@ -0,0 +1,83 @@
+package locode
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// LOCODE represents code from UN/LOCODE coding scheme.
+type LOCODE [2]string
+
+// Record represents a single record of the UN/LOCODE table.
+type Record struct {
+ // Change Indicator.
+ Ch string
+
+ // Combination of a 2-character country code and a 3-character location code.
+ LOCODE LOCODE
+
+ // Name of the locations which has been allocated a UN/LOCODE.
+ Name string
+
+ // Names of the locations which have been allocated a UN/LOCODE without diacritic signs.
+ NameWoDiacritics string
+
+ // ISO 1-3 character alphabetic and/or numeric code for the administrative division of the country concerned.
+ SubDiv string
+
+ // 8-digit function classifier code for the location.
+ Function string
+
+ // Status of the entry by a 2-character code.
+ Status string
+
+ // Last date when the location was updated/entered.
+ Date string
+
+ // The IATA code for the location if different from location code in column LOCODE.
+ IATA string
+
+ // Geographical coordinates (latitude/longitude) of the location, if there is any.
+ Coordinates string
+
+ // Some general remarks regarding the UN/LOCODE in question.
+ Remarks string
+}
+
+// ErrInvalidString is the error of incorrect string format of the LOCODE.
+var ErrInvalidString = errors.New("invalid string format in UN/Locode")
+
+// FromString parses string and returns LOCODE.
+//
+// If string has incorrect format, ErrInvalidString returns.
+func FromString(s string) (*LOCODE, error) {
+ const (
+ locationSeparator = " "
+ locodePartsNumber = 2
+ )
+
+ words := strings.Split(s, locationSeparator)
+ if ln := len(words); ln != locodePartsNumber {
+ return nil, fmt.Errorf(
+ "incorrect locode: it must consist of %d codes separated with a witespase, got: %d",
+ locodePartsNumber,
+ ln,
+ )
+ }
+
+ l := new(LOCODE)
+ copy(l[:], words)
+
+ return l, nil
+}
+
+// CountryCode returns a string representation of country code.
+func (l *LOCODE) CountryCode() string {
+ return l[0]
+}
+
+// LocationCode returns a string representation of location code.
+func (l *LOCODE) LocationCode() string {
+ return l[1]
+}
diff --git a/pkg/util/locode/table/csv/calls.go b/pkg/util/locode/table/csv/calls.go
new file mode 100644
index 000000000..5f40865be
--- /dev/null
+++ b/pkg/util/locode/table/csv/calls.go
@@ -0,0 +1,156 @@
+package csvlocode
+
+import (
+ "encoding/csv"
+ "errors"
+ "io"
+ "os"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+)
+
+var errInvalidRecord = errors.New("invalid table record")
+
+// IterateAll scans a table record one-by-one, parses a UN/LOCODE record
+// from it and passes it to f.
+//
+// Returns f's errors directly.
+func (t *Table) IterateAll(f func(locode.Record) error) error {
+ const wordsPerRecord = 12
+
+ return t.scanWords(t.paths, wordsPerRecord, func(words []string) error {
+ lc, err := locode.FromString(strings.Join(words[1:3], " "))
+ if err != nil {
+ return err
+ }
+
+ record := locode.Record{
+ Ch: words[0],
+ LOCODE: *lc,
+ Name: words[3],
+ NameWoDiacritics: words[4],
+ SubDiv: words[5],
+ Function: words[6],
+ Status: words[7],
+ Date: words[8],
+ IATA: words[9],
+ Coordinates: words[10],
+ Remarks: words[11],
+ }
+
+ return f(record)
+ })
+}
+
+const (
+ _ = iota - 1
+
+ subDivCountry
+ subDivSubdivision
+ subDivName
+ _ // subDivLevel
+
+ subDivFldNum
+)
+
+type subDivKey struct {
+ countryCode,
+ subDivCode string
+}
+
+type subDivRecord struct {
+ name string
+}
+
+// SubDivName scans a table record to an in-memory table (once),
+// and returns the subdivision name of the country and the subdivision codes match.
+//
+// Returns locodedb.ErrSubDivNotFound if no entry matches.
+func (t *Table) SubDivName(countryCode *locodedb.CountryCode, code string) (string, error) {
+ if err := t.initSubDiv(); err != nil {
+ return "", err
+ }
+
+ rec, ok := t.mSubDiv[subDivKey{
+ countryCode: countryCode.String(),
+ subDivCode: code,
+ }]
+ if !ok {
+ return "", locodedb.ErrSubDivNotFound
+ }
+
+ return rec.name, nil
+}
+
+func (t *Table) initSubDiv() (err error) {
+ t.subDivOnce.Do(func() {
+ t.mSubDiv = make(map[subDivKey]subDivRecord)
+
+ err = t.scanWords([]string{t.subDivPath}, subDivFldNum, func(words []string) error {
+ t.mSubDiv[subDivKey{
+ countryCode: words[subDivCountry],
+ subDivCode: words[subDivSubdivision],
+ }] = subDivRecord{
+ name: words[subDivName],
+ }
+
+ return nil
+ })
+ })
+
+ return
+}
+
+var errScanInt = errors.New("interrupt scan")
+
+func (t *Table) scanWords(paths []string, fpr int, wordsHandler func([]string) error) error {
+ var (
+ rdrs = make([]io.Reader, 0, len(t.paths))
+ closers = make([]io.Closer, 0, len(t.paths))
+ )
+
+ for i := range paths {
+ file, err := os.OpenFile(paths[i], os.O_RDONLY, t.mode)
+ if err != nil {
+ return err
+ }
+
+ rdrs = append(rdrs, file)
+ closers = append(closers, file)
+ }
+
+ defer func() {
+ for i := range closers {
+ _ = closers[i].Close()
+ }
+ }()
+
+ r := csv.NewReader(io.MultiReader(rdrs...))
+ r.ReuseRecord = true
+ r.FieldsPerRecord = fpr
+
+ for {
+ words, err := r.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return err
+ } else if len(words) != fpr {
+ return errInvalidRecord
+ }
+
+ if err := wordsHandler(words); err != nil {
+ if errors.Is(err, errScanInt) {
+ break
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/util/locode/table/csv/opts.go b/pkg/util/locode/table/csv/opts.go
new file mode 100644
index 000000000..5aaffd7c1
--- /dev/null
+++ b/pkg/util/locode/table/csv/opts.go
@@ -0,0 +1,28 @@
+package csvlocode
+
+import (
+ "io/fs"
+)
+
+// Option sets an optional parameter of Table.
+type Option func(*options)
+
+type options struct {
+ mode fs.FileMode
+
+ extraPaths []string
+}
+
+func defaultOpts() *options {
+ return &options{
+ mode: 0700,
+ }
+}
+
+// WithExtraPaths returns an option to add extra paths
+// to UN/LOCODE tables in csv format.
+func WithExtraPaths(ps ...string) Option {
+ return func(o *options) {
+ o.extraPaths = append(o.extraPaths, ps...)
+ }
+}
diff --git a/pkg/util/locode/table/csv/table.go b/pkg/util/locode/table/csv/table.go
new file mode 100644
index 000000000..b84c2b705
--- /dev/null
+++ b/pkg/util/locode/table/csv/table.go
@@ -0,0 +1,75 @@
+package csvlocode
+
+import (
+ "fmt"
+ "io/fs"
+ "sync"
+)
+
+// Prm groups the required parameters of the Table's constructor.
+//
+// All values must comply with the requirements imposed on them.
+// Passing incorrect parameter values will result in constructor
+// failure (error or panic depending on the implementation).
+type Prm struct {
+ // Path to UN/LOCODE csv table.
+ //
+ // Must not be empty.
+ Path string
+
+ // Path to csv table of UN/LOCODE Subdivisions.
+ //
+ // Must not be empty.
+ SubDivPath string
+}
+
+// Table is a descriptor of the UN/LOCODE table in csv format.
+//
+// For correct operation, Table must be created
+// using the constructor (New) based on the required parameters
+// and optional components. After successful creation,
+// The Table is immediately ready to work through API.
+type Table struct {
+ paths []string
+
+ mode fs.FileMode
+
+ subDivPath string
+
+ subDivOnce sync.Once
+
+ mSubDiv map[subDivKey]subDivRecord
+}
+
+const invalidPrmValFmt = "invalid parameter %s (%T):%v"
+
+func panicOnPrmValue(n string, v any) {
+ panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
+}
+
+// New creates a new instance of the Table.
+//
+// Panics if at least one value of the parameters is invalid.
+//
+// The created Table does not require additional
+// initialization and is completely ready for work.
+func New(prm Prm, opts ...Option) *Table {
+ switch {
+ case prm.Path == "":
+ panicOnPrmValue("Path", prm.Path)
+ case prm.SubDivPath == "":
+ panicOnPrmValue("SubDivPath", prm.SubDivPath)
+ }
+
+ o := defaultOpts()
+
+ for i := range opts {
+ opts[i](o)
+ }
+
+ return &Table{
+ paths: append(o.extraPaths, prm.Path),
+ mode: o.mode,
+ subDivPath: prm.SubDivPath,
+ }
+}
diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go
deleted file mode 100644
index 413b1d9aa..000000000
--- a/pkg/util/logger/log.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package logger
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
- qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
- "go.uber.org/zap"
-)
-
-func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Debug(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Info(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Warn(msg, appendContext(ctx, fields...)...)
-}
-
-func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) {
- l.z.Error(msg, appendContext(ctx, fields...)...)
-}
-
-func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field {
- if traceID := tracing.GetTraceID(ctx); traceID != "" {
- fields = append(fields, zap.String("trace_id", traceID))
- }
- if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined {
- fields = append(fields, zap.String("io_tag", ioTag))
- }
- return fields
-}
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index a1998cb1a..fcac09321 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -1,11 +1,6 @@
package logger
import (
- "fmt"
- "time"
-
- "git.frostfs.info/TrueCloudLab/zapjournald"
- "github.com/ssgreg/journald"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
@@ -13,10 +8,8 @@ import (
// Logger represents a component
// for writing messages to log.
type Logger struct {
- z *zap.Logger
- c zapcore.Core
- t Tag
- w bool
+ *zap.Logger
+ lvl zap.AtomicLevel
}
// Prm groups Logger's parameters.
@@ -25,33 +18,25 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
-// See also Logger.Reload, SetLevelString.
+// Passing Prm after a successful connection via the NewLogger, connects
+// the Prm to a new instance of the Logger.
+//
+// See also Reload, SetLevelString.
type Prm struct {
+ // link to the created Logger
+ // instance; used for a runtime
+ // reconfiguration
+ _log *Logger
+
// support runtime rereading
level zapcore.Level
- // SamplingHook hook for the zap.Logger
- SamplingHook func(e zapcore.Entry, sd zapcore.SamplingDecision)
+ // MetricsNamespace is the namespace string used for log counter metrics
+ MetricsNamespace string
// do not support runtime rereading
- dest string
-
- // PrependTimestamp specifies whether to prepend a timestamp in the log
- PrependTimestamp bool
-
- // Options for zap.Logger
- Options []zap.Option
-
- // map of tag's bit masks to log level, overrides lvl
- tl map[Tag]zapcore.Level
}
-const (
- DestinationUndefined = ""
- DestinationStdout = "stdout"
- DestinationJournald = "journald"
-)
-
// SetLevelString sets the minimum logging level. Default is
// "info".
//
@@ -63,20 +48,20 @@ func (p *Prm) SetLevelString(s string) error {
return p.level.UnmarshalText([]byte(s))
}
-func (p *Prm) SetDestination(d string) error {
- if d != DestinationStdout && d != DestinationJournald {
- return fmt.Errorf("invalid logger destination %s", d)
+// Reload reloads configuration of a connected instance of the Logger.
+// Returns ErrLoggerNotConnected if no connection has been performed.
+// Returns any reconfiguration error from the Logger directly.
+func (p Prm) Reload() error {
+ if p._log == nil {
+ // incorrect logger usage
+ panic("parameters are not connected to any Logger")
}
- if p != nil {
- p.dest = d
- }
- return nil
+
+ return p._log.reload(p)
}
-// SetTags parses list of tags with log level.
-func (p *Prm) SetTags(tags [][]string) (err error) {
- p.tl, err = parseTags(tags)
- return err
+func defaultPrm() *Prm {
+ return new(Prm)
}
// NewLogger constructs a new zap logger instance. Constructing with nil
@@ -90,154 +75,37 @@ func (p *Prm) SetTags(tags [][]string) (err error) {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
-func NewLogger(prm Prm) (*Logger, error) {
- switch prm.dest {
- case DestinationUndefined, DestinationStdout:
- return newConsoleLogger(prm)
- case DestinationJournald:
- return newJournaldLogger(prm)
- default:
- return nil, fmt.Errorf("unknown destination %s", prm.dest)
+func NewLogger(prm *Prm) (*Logger, error) {
+ if prm == nil {
+ prm = defaultPrm()
}
-}
-func newConsoleLogger(prm Prm) (*Logger, error) {
+ lvl := zap.NewAtomicLevelAt(prm.level)
+
+ m := newLogMetrics(prm.MetricsNamespace)
+
c := zap.NewProductionConfig()
- c.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ c.Level = lvl
c.Encoding = "console"
- if prm.SamplingHook != nil {
- c.Sampling.Hook = prm.SamplingHook
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ c.Sampling.Hook = func(e zapcore.Entry, sd zapcore.SamplingDecision) {
+ m.Inc(e.Level, sd == zapcore.LogDropped)
}
- if prm.PrependTimestamp {
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
- } else {
- c.EncoderConfig.TimeKey = ""
- }
-
- opts := []zap.Option{
+ lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap, err := c.Build(opts...)
+ )
if err != nil {
return nil, err
}
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
+
+ l := &Logger{Logger: lZap, lvl: lvl}
+ prm._log = l
return l, nil
}
-func newJournaldLogger(prm Prm) (*Logger, error) {
- c := zap.NewProductionConfig()
- if prm.SamplingHook != nil {
- c.Sampling.Hook = prm.SamplingHook
- }
-
- if prm.PrependTimestamp {
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
- } else {
- c.EncoderConfig.TimeKey = ""
- }
-
- encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
-
- core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields)
- coreWithContext := core.With([]zapcore.Field{
- zapjournald.SyslogFacility(zapjournald.LogDaemon),
- zapjournald.SyslogIdentifier(),
- zapjournald.SyslogPid(),
- })
-
- var samplerOpts []zapcore.SamplerOption
- if c.Sampling.Hook != nil {
- samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook))
- }
- samplingCore := zapcore.NewSamplerWithOptions(
- coreWithContext,
- time.Second,
- c.Sampling.Initial,
- c.Sampling.Thereafter,
- samplerOpts...,
- )
- opts := []zap.Option{
- zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
- zap.AddCallerSkip(1),
- }
- opts = append(opts, prm.Options...)
- lZap := zap.New(samplingCore, opts...)
- l := &Logger{z: lZap, c: lZap.Core()}
- l = l.WithTag(TagMain)
-
- return l, nil
-}
-
-// With create a child logger with new fields, don't affect the parent.
-// Throws panic if tag is unset.
-func (l *Logger) With(fields ...zap.Field) *Logger {
- if l.t == 0 {
- panic("tag is unset")
- }
- c := *l
- c.z = l.z.With(fields...)
- // With called under the logger
- c.w = true
- return &c
-}
-
-type core struct {
- c zapcore.Core
- l zap.AtomicLevel
-}
-
-func (c *core) Enabled(lvl zapcore.Level) bool {
- return c.l.Enabled(lvl)
-}
-
-func (c *core) With(fields []zapcore.Field) zapcore.Core {
- clone := *c
- clone.c = clone.c.With(fields)
- return &clone
-}
-
-func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return c.c.Check(e, ce)
-}
-
-func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error {
- return c.c.Write(e, fields)
-}
-
-func (c *core) Sync() error {
- return c.c.Sync()
-}
-
-// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger.
-// Throws panic if provided unsupported tag.
-func (l *Logger) WithTag(tag Tag) *Logger {
- if tag == 0 || tag > Tag(len(_Tag_index)-1) {
- panic("unsupported tag " + tag.String())
- }
- if l.w {
- panic("unsupported operation for the logger's state")
- }
- c := *l
- c.t = tag
- c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return &core{
- c: l.c.With([]zap.Field{zap.String("tag", tag.String())}),
- l: tagToLogLevel[tag],
- }
- }))
- return &c
-}
-
-func NewLoggerWrapper(z *zap.Logger) *Logger {
- return &Logger{
- z: z.WithOptions(zap.AddCallerSkip(1)),
- t: TagMain,
- c: z.Core(),
- }
+func (l *Logger) reload(prm Prm) error {
+ l.lvl.SetLevel(prm.level)
+ return nil
}
diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go
deleted file mode 100644
index b867ee6cc..000000000
--- a/pkg/util/logger/logger_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package logger
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-)
-
-func BenchmarkLogger(b *testing.B) {
- ctx := context.Background()
- m := map[string]Prm{}
-
- prm := Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- m["logging enabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- m["logging disabled"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("error"))
- require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}}))
- m["logging enabled via tags"] = prm
-
- prm = Prm{}
- require.NoError(b, prm.SetLevelString("debug"))
- require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}}))
- m["logging disabled via tags"] = prm
-
- for k, v := range m {
- b.Run(k, func(b *testing.B) {
- logger, err := createLogger(v)
- require.NoError(b, err)
- UpdateLevelForTags(v)
- b.ResetTimer()
- b.ReportAllocs()
- for range b.N {
- logger.Info(ctx, "test info")
- }
- })
- }
-}
-
-type testCore struct {
- core zapcore.Core
-}
-
-func (c *testCore) Enabled(lvl zapcore.Level) bool {
- return c.core.Enabled(lvl)
-}
-
-func (c *testCore) With(fields []zapcore.Field) zapcore.Core {
- c.core = c.core.With(fields)
- return c
-}
-
-func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- return ce.AddCore(e, c)
-}
-
-func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error {
- return nil
-}
-
-func (c *testCore) Sync() error {
- return c.core.Sync()
-}
-
-func createLogger(prm Prm) (*Logger, error) {
- prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
- tc := testCore{core: core}
- return &tc
- })}
- return NewLogger(prm)
-}
-
-func TestLoggerOutput(t *testing.T) {
- obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel))
-
- prm := Prm{}
- require.NoError(t, prm.SetLevelString("debug"))
- prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core {
- return obs
- })}
- loggerMain, err := NewLogger(prm)
- require.NoError(t, err)
- UpdateLevelForTags(prm)
-
- loggerMainWith := loggerMain.With(zap.String("key", "value"))
-
- require.Panics(t, func() {
- loggerMainWith.WithTag(TagShard)
- })
- loggerShard := loggerMain.WithTag(TagShard)
- loggerShard = loggerShard.With(zap.String("key1", "value1"))
-
- loggerMorph := loggerMain.WithTag(TagMorph)
- loggerMorph = loggerMorph.With(zap.String("key2", "value2"))
-
- ctx := context.Background()
- loggerMain.Debug(ctx, "main")
- loggerMainWith.Debug(ctx, "main with")
- loggerShard.Debug(ctx, "shard")
- loggerMorph.Debug(ctx, "morph")
-
- require.Len(t, logs.All(), 4)
- require.Len(t, logs.FilterFieldKey("key").All(), 1)
- require.Len(t, logs.FilterFieldKey("key1").All(), 1)
- require.Len(t, logs.FilterFieldKey("key2").All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2)
- require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1)
- require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1)
-}
diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result
deleted file mode 100644
index 612fa2967..000000000
--- a/pkg/util/logger/logger_test.result
+++ /dev/null
@@ -1,46 +0,0 @@
-goos: linux
-goarch: amd64
-pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger
-cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
-BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op
-BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op
-PASS
-ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s
diff --git a/pkg/util/logger/metrics.go b/pkg/util/logger/metrics.go
index 7e62e6383..708583473 100644
--- a/pkg/util/logger/metrics.go
+++ b/pkg/util/logger/metrics.go
@@ -14,16 +14,11 @@ const (
logDroppedLabel = "dropped"
)
-type LogMetrics interface {
- Inc(level zapcore.Level, dropped bool)
- GetSamplingHook() func(e zapcore.Entry, sd zapcore.SamplingDecision)
-}
-
type logMetrics struct {
logCount *prometheus.CounterVec
}
-func NewLogMetrics(namespace string) LogMetrics {
+func newLogMetrics(namespace string) *logMetrics {
return &logMetrics{
logCount: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
@@ -40,9 +35,3 @@ func (m *logMetrics) Inc(level zapcore.Level, dropped bool) {
logDroppedLabel: strconv.FormatBool(dropped),
}).Inc()
}
-
-func (m *logMetrics) GetSamplingHook() func(zapcore.Entry, zapcore.SamplingDecision) {
- return func(e zapcore.Entry, sd zapcore.SamplingDecision) {
- m.Inc(e.Level, sd == zapcore.LogDropped)
- }
-}
diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go
deleted file mode 100644
index 1b98f2e62..000000000
--- a/pkg/util/logger/tag_string.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT.
-
-package logger
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[TagMain-1]
- _ = x[TagMorph-2]
- _ = x[TagGrpcSvc-3]
- _ = x[TagIr-4]
- _ = x[TagProcessor-5]
- _ = x[TagEngine-6]
- _ = x[TagBlobovnicza-7]
- _ = x[TagBlobovniczaTree-8]
- _ = x[TagBlobstor-9]
- _ = x[TagFSTree-10]
- _ = x[TagGC-11]
- _ = x[TagShard-12]
- _ = x[TagWriteCache-13]
- _ = x[TagDeleteSvc-14]
- _ = x[TagGetSvc-15]
- _ = x[TagSearchSvc-16]
- _ = x[TagSessionSvc-17]
- _ = x[TagTreeSvc-18]
- _ = x[TagPolicer-19]
- _ = x[TagReplicator-20]
-}
-
-const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator"
-
-var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148}
-
-func (i Tag) String() string {
- i -= 1
- if i >= Tag(len(_Tag_index)-1) {
- return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")"
- }
- return _Tag_name[_Tag_index[i]:_Tag_index[i+1]]
-}
diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go
deleted file mode 100644
index a5386707e..000000000
--- a/pkg/util/logger/tags.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package logger
-
-import (
- "fmt"
- "strings"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-//go:generate stringer -type Tag -linecomment
-
-type Tag uint8
-
-const (
- _ Tag = iota //
- TagMain // main
- TagMorph // morph
- TagGrpcSvc // grpcsvc
- TagIr // ir
- TagProcessor // processor
- TagEngine // engine
- TagBlobovnicza // blobovnicza
- TagBlobovniczaTree // blobovniczatree
- TagBlobstor // blobstor
- TagFSTree // fstree
- TagGC // gc
- TagShard // shard
- TagWriteCache // writecache
- TagDeleteSvc // deletesvc
- TagGetSvc // getsvc
- TagSearchSvc // searchsvc
- TagSessionSvc // sessionsvc
- TagTreeSvc // treesvc
- TagPolicer // policer
- TagReplicator // replicator
-
- defaultLevel = zapcore.InfoLevel
-)
-
-var (
- tagToLogLevel = map[Tag]zap.AtomicLevel{}
- stringToTag = map[string]Tag{}
-)
-
-func init() {
- for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ {
- tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel)
- stringToTag[i.String()] = i
- }
-}
-
-// parseTags returns:
-// - map(always instantiated) of tag to custom log level for that tag;
-// - error if it occurred(map is empty).
-func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) {
- m := make(map[Tag]zapcore.Level)
- if len(raw) == 0 {
- return m, nil
- }
- for _, item := range raw {
- str, level := item[0], item[1]
- if len(level) == 0 {
- // It is not necessary to parse tags without level,
- // because default log level will be used.
- continue
- }
- var l zapcore.Level
- err := l.UnmarshalText([]byte(level))
- if err != nil {
- return nil, err
- }
- tmp := strings.Split(str, ",")
- for _, tagStr := range tmp {
- tag, ok := stringToTag[strings.TrimSpace(tagStr)]
- if !ok {
- return nil, fmt.Errorf("unsupported tag %s", str)
- }
- m[tag] = l
- }
- }
- return m, nil
-}
-
-func UpdateLevelForTags(prm Prm) {
- for k, v := range tagToLogLevel {
- nk, ok := prm.tl[k]
- if ok {
- v.SetLevel(nk)
- } else {
- v.SetLevel(prm.level)
- }
- }
-}
diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go
index b5b0a31eb..4a2870870 100644
--- a/pkg/util/logger/test/logger.go
+++ b/pkg/util/logger/test/logger.go
@@ -4,17 +4,26 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest"
)
// NewLogger creates a new logger.
-func NewLogger(t testing.TB) *logger.Logger {
- return logger.NewLoggerWrapper(
- zaptest.NewLogger(t,
- zaptest.Level(zapcore.DebugLevel),
- zaptest.WrapOptions(zap.Development(), zap.AddCaller()),
- ),
- )
+//
+// If debug, development logger is created.
+func NewLogger(t testing.TB, debug bool) *logger.Logger {
+ var l logger.Logger
+ l.Logger = zap.L()
+
+ if debug {
+ cfg := zap.NewDevelopmentConfig()
+ cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
+
+ log, err := cfg.Build()
+ require.NoError(t, err, "could not prepare logger")
+ l.Logger = log
+ }
+
+ return &l
}
diff --git a/pkg/util/os.go b/pkg/util/os.go
index 30e08a8c3..1c4c97806 100644
--- a/pkg/util/os.go
+++ b/pkg/util/os.go
@@ -6,5 +6,5 @@ import "os"
// but with +x for a user and a group. This makes the created
// dir openable regardless of the passed permissions.
func MkdirAllX(path string, perm os.FileMode) error {
- return os.MkdirAll(path, perm|0o110)
+ return os.MkdirAll(path, perm|0110)
}
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index a06296a07..97508f82a 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint32 returns a random uint32 value.
+// Uint64 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
deleted file mode 100644
index bd15d0e8f..000000000
--- a/pkg/util/sdnotify/sdnotify.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sdnotify
-
-import (
- "errors"
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
-
- "golang.org/x/sys/unix"
-)
-
-const (
- ReadyEnabled = "READY=1"
- StoppingEnabled = "STOPPING=1"
- ReloadingEnabled = "RELOADING=1"
-)
-
-var (
- socket *net.UnixAddr
-
- errSocketVariableIsNotPresent = errors.New("\"NOTIFY_SOCKET\" environment variable is not present")
- errSocketIsNotInitialized = errors.New("socket is not initialized")
-)
-
-// InitSocket initializes socket with provided name of
-// environment variable.
-func InitSocket() error {
- notifySocket := os.Getenv("NOTIFY_SOCKET")
- if notifySocket == "" {
- return errSocketVariableIsNotPresent
- }
- socket = &net.UnixAddr{
- Name: notifySocket,
- Net: "unixgram",
- }
- return nil
-}
-
-// FlagAndStatus sends systemd a combination of a
-// well-known status and STATUS=%s{status}, separated by newline.
-func FlagAndStatus(status string) error {
- if status == ReloadingEnabled {
- // From https://www.man7.org/linux/man-pages/man5/systemd.service.5.html
- //
- // When initiating the reload process the service is
- // expected to reply with a notification message via
- // sd_notify(3) that contains the "RELOADING=1" field in
- // combination with "MONOTONIC_USEC=" set to the current
- // monotonic time (i.e. CLOCK_MONOTONIC in
- // clock_gettime(2)) in μs, formatted as decimal string.
- // Once reloading is complete another notification message
- // must be sent, containing "READY=1".
- //
- // For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html
- var ts unix.Timespec
- if err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts); err != nil {
- return fmt.Errorf("clock_gettime: %w", err)
- }
- status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10)
- status += "\nSTATUS=RELOADING"
- return Send(status)
- }
- status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
- return Send(status)
-}
-
-// Status sends systemd notify STATUS=%s{status}.
-func Status(status string) error {
- return Send("STATUS=" + status)
-}
-
-// ClearStatus resets the current service status previously set by Status.
-func ClearStatus() error {
- return Status("")
-}
-
-// Send state through the notify socket if any.
-// If the notify socket was not detected, it returns an error.
-func Send(state string) error {
- if socket == nil {
- return errSocketIsNotInitialized
- }
- conn, err := net.DialUnix(socket.Net, nil, socket)
- if err != nil {
- return fmt.Errorf("can't open unix socket: %v", err)
- }
- defer conn.Close()
- if _, err = conn.Write([]byte(state)); err != nil {
- return fmt.Errorf("can't write into the unix socket: %v", err)
- }
- return nil
-}
diff --git a/pkg/util/state/storage.go b/pkg/util/state/storage.go
index ee957f270..0485b1481 100644
--- a/pkg/util/state/storage.go
+++ b/pkg/util/state/storage.go
@@ -19,7 +19,7 @@ var stateBucket = []byte("state")
// NewPersistentStorage creates a new instance of a storage with 0600 rights.
func NewPersistentStorage(path string) (*PersistentStorage, error) {
- db, err := bbolt.Open(path, 0o600, nil)
+ db, err := bbolt.Open(path, 0600, nil)
if err != nil {
return nil, fmt.Errorf("can't open bbolt at %s: %w", path, err)
}
diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go
index 259064ecf..3b3e6a694 100644
--- a/pkg/util/sync/key_locker_test.go
+++ b/pkg/util/sync/key_locker_test.go
@@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) {
taken := false
eg, _ := errgroup.WithContext(context.Background())
keyLocker := NewKeyLocker[int]()
- for range 100 {
+ for i := 0; i < 100; i++ {
eg.Go(func() error {
keyLocker.Lock(0)
defer keyLocker.Unlock(0)
diff --git a/pkg/util/test/keys.go b/pkg/util/test/keys.go
new file mode 100644
index 000000000..d233a633a
--- /dev/null
+++ b/pkg/util/test/keys.go
@@ -0,0 +1,137 @@
+package test
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/hex"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Keys is a list of test private keys in hex format.
+var Keys = []string{
+ "307702010104203ee1fd84dd7199925f8d32f897aaa7f2d6484aa3738e5e0abd03f8240d7c6d8ca00a06082a8648ce3d030107a1440342000475099c302b77664a2508bec1cae47903857b762c62713f190e8d99912ef76737f36191e4c0ea50e47b0e0edbae24fd6529df84f9bd63f87219df3a086efe9195",
+ "3077020101042035f2b425109b17b1d8f3b5c50daea1091e27d2452bce1126080bd4b98de9bb67a00a06082a8648ce3d030107a144034200045188d33a3113ac77fea0c17137e434d704283c234400b9b70bcdf4829094374abb5818767e460a94f36046ffcef44576fa59ef0e5f31fb86351c06c3d84e156c",
+ "30770201010420f20cd67ed4ea58307945f5e89a5e016b463fbcad610ee9a7b5e0094a780c63afa00a06082a8648ce3d030107a14403420004c4c574d1bbe7efb2feaeed99e6c03924d6d3c9ad76530437d75c07bff3ddcc0f3f7ef209b4c5156b7395dfa4479dd6aca00d8b0419c2d0ff34de73fad4515694",
+ "30770201010420335cd4300acc9594cc9a0b8c5b3b3148b29061d019daac1b97d0fbc884f0281ea00a06082a8648ce3d030107a14403420004563eece0b9035e679d28e2d548072773c43ce44a53cb7f30d3597052210dbb70674d8eefac71ca17b3dc6499c9167e833b2c079b2abfe87a5564c2014c6132ca",
+ "30770201010420063a502c7127688e152ce705f626ca75bf0b62c5106018460f1b2a0d86567546a00a06082a8648ce3d030107a14403420004f8152966ad33b3c2622bdd032f5989fbd63a9a3af34e12eefee912c37defc8801ef16cc2c16120b3359b7426a7609af8f4185a05dcd42e115ae0df0758bc4b4c",
+ "30770201010420714c3ae55534a1d065ea1213f40a3b276ec50c75eb37ee5934780e1a48027fa2a00a06082a8648ce3d030107a1440342000452d9fd2376f6b3bcb4706cad54ec031d95a1a70414129286c247cd2bc521f73fa8874a6a6466b9d111631645d891e3692688d19c052c244e592a742173ea8984",
+ "30770201010420324b97d5f2c68e402b6723c600c3a7350559cc90018f9bfce0deed3d57890916a00a06082a8648ce3d030107a1440342000451ec65b2496b1d8ece3efe68a8b57ce7bc75b4171f07fa5b26c63a27fb4f92169c1b15150a8bace13f322b554127eca12155130c0b729872935fd714df05df5e",
+ "3077020101042086ebcc716545e69a52a7f9a41404583e17984a20d96fafe9a98de0ac420a2f88a00a06082a8648ce3d030107a144034200045f7d63e18e6b896730f45989b7a8d00c0b86c75c2b834d903bc681833592bdcc25cf189e6ddef7b22217fd442b9825f17a985e7e2020b20188486dd53be9073e",
+ "3077020101042021a5b7932133e23d4ebb7a39713defd99fc94edfc909cf24722754c9077f0d61a00a06082a8648ce3d030107a14403420004d351a4c87ec3b33e62610cb3fd197962c0081bbe1b1b888bc41844f4c6df9cd3fd4637a6f35aa3d4531fecc156b1707504f37f9ef154beebc622afc29ab3f896",
+ "3077020101042081ef410f78e459fa110908048fc8923fe1e84d7ce75f78f32b8c114c572bfb87a00a06082a8648ce3d030107a144034200046e3859e6ab43c0f45b7891761f0da86a7b62f931f3d963efd3103924920a73b32ce5bc8f14d8fb31e63ccd336b0016eeb951323c915339ca6c4c1ebc01bbeb2b",
+ "307702010104209dd827fa67faf3912e981b8dbccafb6ded908957ba67cf4c5f37c07d33abb6c5a00a06082a8648ce3d030107a14403420004e5cb5ae6a1bd3861a6b233c9e13fa0183319f601d0f4e99b27461e28f473e822de395d15c1e14d29a6bd4b597547e8c5d09a7dd3a722a739bb76936c1ad43c0e",
+ "3077020101042005a03e332e1aff5273c52c38ec6c5a1593170ddf8d13989a8a160d894566fc6ba00a06082a8648ce3d030107a144034200045a11611542f07f2d5666de502994ef61f069674513811df42290254c26f71134100fed43ea8ecd9833be9abb42d95be8661f790c15b41ca20db5b4df4f664fb4",
+ "307702010104206e833f66daf44696cafc63297ff88e16ba13feefa5b6ab3b92a771ff593e96d0a00a06082a8648ce3d030107a14403420004434e0e3ec85c1edaf614f91b7e3203ab4d8e7e1c8a2042223f882fc04da7b1f77f8f2ee3b290ecfa6470a1c416a22b368d05578beb25ec31bcf60aff2e3ffcd4",
+ "30770201010420937c4796b9fc62fde4521c18289f0e610cf9b5ebf976be8d292bc8306cee2011a00a06082a8648ce3d030107a14403420004ba5951adddf8eb9bc5dac2c03a33584d321f902353c0aadccd3158256b294f5aa9cd5215201d74de2906630d8cefb4f298ff89caa29b5c90f9d15294f8d785bc",
+ "307702010104204b002204533f9b2fb035087df7f4288e496fc84e09299765de7a6cd61e6a32bca00a06082a8648ce3d030107a1440342000441abcf37a4d0962156c549de8497120b87e5e370a967188ab1d2d7abce53711dfd692a37f30018e2d14030185b16a8e0b9ca61dca82bfe6d8fc55c836355b770",
+ "3077020101042093ffa35f1977b170a0343986537de367f59ea5a8bd4a8fdd01c5d9700a7282dba00a06082a8648ce3d030107a144034200040e01090b297cf536740b5c0abb15afba03139b0d4b647fdd0c01d457936499c19283cf7b1aee2899923e879c97ddeffe4a1fa2bffc59d331b55982972524b45b",
+ "307702010104201c1a2209a2b6f445fb63b9c6469d3edc01c99bab10957f0cbe5fad2b1c548975a00a06082a8648ce3d030107a144034200040c8fd2da7bad95b6b3782c0a742476ffcb35e5bc539ea19bbccb5ed05265da3ab51ec39afd01fbee800e05ec0eb94b68854cd9c3de6ab028d011c53085ffc1b3",
+ "30770201010420b524d8cba99619f1f9559e2fe38b2c6d84a484d38574a92e56977f79eac8b537a00a06082a8648ce3d030107a14403420004a6d7d0db0cc0a46860fb912a7ace42c801d8d693e2678f07c3f5b9ea3cb0311169cbd96b0b9fc78f81e73d2d432b2c224d8d84380125ecc126481ee322335740",
+ "307702010104207681725fec424a0c75985acfb7be7baed18b43ec7a18c0b47aa757849444557ca00a06082a8648ce3d030107a14403420004bd4453efc74d7dedf442b6fc249848c461a0c636bb6a85c86a194add1f8a5fac9bf0c04ece3f233c5aba2dee0d8a2a11b6a297edae60c0bc0536454ce0b5f9dd",
+ "30770201010420ae43929b14666baa934684c20a03358cda860b89208824fac56b48f80920edc4a00a06082a8648ce3d030107a14403420004d706b0d86743d6052375aa5aa1a3613c87dccfe704dc85b4ed4f49a84a248a94582202927ec0c082234919f3ce6617152ba0d02497b81c61284261ce86cef905",
+ "3077020101042089d600f43c47ab98e00225e9b2d4a6c7ab771490f856d4679d9e1e0cca3009d0a00a06082a8648ce3d030107a144034200048515055045543e429173fc8f9f56a070bd4314b2b3005437d8504e6b6885f85101409b933e27c0de11415aee516d0d1b474088a437ece496ceb4f1c131e9ea40",
+ "3077020101042015518dcf888c7b241dac1c8bfa19d99f7fdba7ba37ed57d69bbbd95bb376ea4ca00a06082a8648ce3d030107a1440342000459e88d92efaa5277d60948feaa0bcd14388da00e35f9bae8282985441788f8beb2b84b71b1ae8aa24d64bb83759b80e3f05c07a791ffe10079c0e1694d74618c",
+ "307702010104203e840868a96e59ca10f048202cce02e51655a932ff0ac98a7b5589a8df17f580a00a06082a8648ce3d030107a14403420004f296414e914dcefd29bc8a493f8aedc683e5514a8ec5160637bee40ebaa85a421a363c8f7ce3ed113e97d2c4b6d9cd31d21698a54fce8d8e280a6be9ee4fbca9",
+ "30770201010420aa746067891cf005286d56d53092f77961f828bf5bf11aade18c8a458090d39aa00a06082a8648ce3d030107a144034200044af5ad2dacbb32ab795ab734d26bae6c098bd2ba9ca607542174d61b49ca3c07786aeb0c96908793a63d4f20cd370a77b7ec65e6b285c6337764e7ae3cd5fa1c",
+ "307702010104207135cbd831d52e778622c21ed035df9e3c6e4128de38fbf4d165a0583b5b4a29a00a06082a8648ce3d030107a1440342000412e2b9e11f288d8db60fbb00456f5969e2816a214a295d8e4d38fbacab6b0a7e0cdb8557e53d408244083f192d8a604d5b764ab44b467e34664ca82e012b60ab",
+ "3077020101042064b839ca26c42e2e97e94da5589db2de18597a12d6167fdfe0d20e932de747a2a00a06082a8648ce3d030107a1440342000481e90c2173b720447ae28361149598a7245ed51c3881a89353da25b8e574b8c9b2d80b2563efe5d9a0184b57af2431116c8a4ad8071ef2764ca3d3744c638401",
+ "30770201010420a56df8e6349520d27c36eb1e9675720c702d562842c859cd54b3d866f2cada30a00a06082a8648ce3d030107a14403420004dc08beb5b857f6da13ae1116e40a6e4e4b5aaebc8040eae0b3037c243b1c24def39de670380472df7aa98cb9e0f1132bc4afc0629d80a24c54b8ad600cb24cd4",
+ "30770201010420bd2dd18485a9667673b2c38c2ad51cc756a199d18fe1100acf29b647a549171ea00a06082a8648ce3d030107a1440342000422825ffe8b3416b6755a7076a7dc6f746ff29ee0a4455dceb0f3262127d51c9bb53f2c204636da8d7a09961274d7c7ba2ef3c771e83fb996ffe3f9882c530ffd",
+ "307702010104203058a0c8de5c6d4a5c7f64883e7d3c9f5097c8bc073cc482421e903b37123c06a00a06082a8648ce3d030107a14403420004f959705673c2f4112673e43d1d876ca71c64153abb6c9f58d1c3b3c1f8c213ee346833fb695eb533664d596a68e42150a21b405e3a08ed70af5f568275a7a79f",
+ "307702010104202bd9035bf38e7c4580abc377a6e9c31aa9bdaff90af2ce688eda9a532c83875ea00a06082a8648ce3d030107a14403420004918010ea3387786c6a257996ec74d7ee4e1703b3b811118f4e89fabfef7c694495191848a0d590313a0be9784644ef98e0f0f7e50fed5bee3fa48d66edbcd2b5",
+ "30770201010420aa055d6cbe96e1cfbe39530bc4b7a976baff53ce399956f0d8241750d3379990a00a06082a8648ce3d030107a1440342000444e8b6deda76c12320a8c5b7a48141ebf5dc9288df79a0f418ab92d82061d10118b8bce9fb200e5009a19fb0e19036762b3ef85440405f43225d6ee3350bf96c",
+ "30770201010420b8712525a79c7bd3df2a9dbabde1a111078a7ef30687a2efe0f0c4b4a23f2aa0a00a06082a8648ce3d030107a144034200049dc9e3d836a834f6d14ae99dfc70ad9b65c84f351c8dbc4f9b1b61c238051fb1db23e43d4b6e17803e21ebc44fe2f66742e306daa8c4ca7d79c6dd01fc1a4e4e",
+ "3077020101042086c18b56c4a2264b37c18a7937f026ab07ca6076eeea1ab90376492efb7875d9a00a06082a8648ce3d030107a144034200042f169311f2fae406de3c4a64fec94a22c35972281922a69e7657185997ae59fb3f69ac94295e58681cfbd263f8e6fbce144cc7925b71d90f57de3f3e10588321",
+ "30770201010420f58221355e1b2da73d66de482ec1edcb8597f3967d00d1356f4678fea6ad67e6a00a06082a8648ce3d030107a14403420004238cc44f02fa566e249a9697a078b9d38eba06012d54a29a430843a18df7a0a4207d704a360399db95eca591f2f81b6c50390467f293a1623b4757bdb4138101",
+ "30770201010420b10888a0157d524667fd575683bdcded4628a65149fde59b7340781b0cf2e36ea00a06082a8648ce3d030107a14403420004222ba11430b8719929c726aec74e8e70893e2960bc2bbee70fbaa6d88fa2a346adf0c450ea9823f0ba77d334fcd476ea036a62199338d7aa32e56c708d7a8caa",
+ "30770201010420edf001bd24c92e4f65789aae228223e77df71ce9bbfd7ce4d236ea3648e1f7fea00a06082a8648ce3d030107a1440342000472693c95786ab9f4e7c923338ce98bd068e28b71f84b77e7adb378c2ce2d8f1a2e13833df1afe4569367d7a4eee3abf50124299a28045a0073ea324f5ddb45ea",
+ "30770201010420e2649e591fc9072dd55573e41fc4ebfdf1db118951e4b7b2a98027ac9a4f7702a00a06082a8648ce3d030107a144034200046e34c9dea1836671f1ef259d7c3ee678c2f92d092af2518413fe9ba153a07ca8e9938784876e90cfa2989a00a83b1ac599c87a8d15be8001e46dfbfe018156a2",
+ "3077020101042069cd9b710f25613794751aed951004c888d4611aefa45abc23abff218e608290a00a06082a8648ce3d030107a14403420004dcf8ff34ab841720ff8dc08b60a14f41689e65f979a1af69b5e106f4262a2cb0947c9619e980caf20b3e7c8f15e60fc31c5b611c8a58370ba8201c9b6b932bd4",
+ "307702010104202898cef1944aaf90fddf433390323a02a79938568cf99f6c25bc9aa9e5cddb0aa00a06082a8648ce3d030107a1440342000491a1c20420f5005f5761419e4dcd0d9da0cf2ea4733f6d98a3d0c124f284cabdc65eafd9d2cad9b1122fca791c8b37997feed130c5725ea797cf07c61fb82734",
+ "30770201010420e568bd3ffa639aa418e7d5bc9e83f3f56690ebf645015ff7f0e216d76045efd5a00a06082a8648ce3d030107a144034200042424b498297124037db950bf2a1e652ba7f977363f4f69d7308531d27bf392219d93cb78f4379b7ffb16f3e7be311e208af2409bd33000fd25a8707ac6bec76b",
+ "307702010104205163d5d5eea4db97fccc692871f257842fdaca0eca967d29924242f7a2c56ad7a00a06082a8648ce3d030107a144034200044e2ca8312122039c3374db08851710d3b9a2efcbd8f5df004ec7b60a348aee32466f799b5957d39845f451071bb1f3bb99f25bf43196e7c772f7b84f39221b3a",
+ "30770201010420301eb936d2737886ab2fbf670952f9ba0d324827b81801810bfd60c89e8ca862a00a06082a8648ce3d030107a14403420004455454b1f3828a2328a8925c4c98bd6e37dece276efb3299d8b7d78c9d7e6f978b14d021c07bae0c18a623fc52ab2fec1523a89b2fd0cda373e9c9442a3545f2",
+ "3077020101042032c12a9bca8070c131b0a46944c17adf35eb44079f3c887fc3b93740bb9c03fca00a06082a8648ce3d030107a14403420004e61da413c4d5dbc6c004089d96a3cb55f4b20b70c544f3823a7a6322c53e134fcb8a885729ef284d68d23e0a58009d48b369f9c4f5a665a8880a48606491dd8a",
+ "30770201010420aa2b40742722b81c6ffd5c47b94b8be747da259e172a82d27ebc525c8f46d17aa00a06082a8648ce3d030107a14403420004f87a863ed11592cf4f96e837038b105d155f5e09a31386ab4604234e8a975d49a9612b4597b7fb206087b70a26bce4aca31edb253530e6da83ce16beefa99f60",
+ "307702010104202a70a0c827b4ce8d433e800ab0818b1401b220fadea75feff655251ee4317556a00a06082a8648ce3d030107a14403420004a5c9209fd53dc1ce2c873782ec507db5e0f9cc78292a84ecafc5bab16c2e4d786a882ad77ad999f3d6ba676ad80354ad376dabc4fa03a6c15ead3aa16f213bc5",
+ "307702010104202787d04901f48c81774171ef2e2a4d440b81f7fa1f12ab93d8e79ffab3416a1ca00a06082a8648ce3d030107a14403420004010d32df4d50343609932a923f11422e3bea5fa1319fb8ce0cc800f66aa38b3f7fda1bc17c824278734baa3d9b7f52262eeacbca21304b74ba4795b5055b1e9f",
+ "3077020101042032423728a897144d4fb95090ca0ac67a23eb22e2f7f925cbddaf542eeaec8faaa00a06082a8648ce3d030107a14403420004c37f9fec5b1be5b0286300ace6a5d25df8189d29604145a77b6578a4e3956ed3d9af48f8ee1e39868bba9e359e5444984f0428755e29d2012f235c9a56749148",
+ "30770201010420d5bd2a3867937e0b903d19113e859ca9f6497f4af082894a6911cef3a3a12d35a00a06082a8648ce3d030107a14403420004435b2e891c46023f422119f18a04c75b9322ea4aaddd10a0568438310896388bf7037e98bd5979a6f0839acb07dead1f2f973640dcc11dcee1de8a07c0b3dd80",
+ "30770201010420590edcf1f2b6ee6c1b836ace33b934597883a00ce84fe812a4b3e22432846972a00a06082a8648ce3d030107a14403420004183d7cad633cb0f4ab774f4dc19b9db87e7ef97b0f4d43ac395d2409dabbe5339dbad661c7c2fd05606e2edb08f8ace660f73bf5232011262d563603f61d2353",
+ "30770201010420a0ea4e16cf8c7c641d70aea82192fb9303aab6e7b5cd72586ba287d50f4612d6a00a06082a8648ce3d030107a1440342000482a72d31e71f0aea778cb42b324abf853cb4e4e8d4b2ae0e5130480073e911f183134c047a7e1cd41a845a38057ea51a1527923518cbf47c3e195a9f44e1d242",
+ "307702010104209e04b00c8d0f96ddb2fbb48cfc199905bfbfcc894acb77b56bf16a945a7c7d08a00a06082a8648ce3d030107a1440342000405efd203dcddfb66d514be0de2b35050b83e3738096cd35398165bfdbe34d34c0d96a4e6df503903c75c2c06b66b02b15cd7bf74c147d7a9f0a5e53b83c5762d",
+ "30770201010420aa69f1cc2cb3482a12af4b1614d6dde01216f1cad1c9f03c681daa8648b75b37a00a06082a8648ce3d030107a1440342000474ffec1297420d0cf730b42942058699d803ab618e1e40ccf9cc17f71f62b3123d863fbf8fae37b6c958892af6151159f74e2a568917bfc2f4e00c55c32b52e7",
+ "3077020101042090a04300e8d6ed9f44422a2cf93817604bf1f6233c4333ba0db20ab726852fa4a00a06082a8648ce3d030107a144034200049e6f2001baf2b6fb25e3273907ed7320f494de6b5882c4c4b9bcee7ddc60274e064cc68c64325c001f07a505722062d1ca9774a2cc1e0cd28fe5f807865bfcc1",
+ "3077020101042088945c19c6ce3e63f8d8a421616391d83bec79a0c590f1607b247ffa0c677dd3a00a06082a8648ce3d030107a1440342000492d17d410f9eabf7ae4509a92494e9fe94a72947f24e60c5bb6e12b2cde3c1bfe5305a0d759138069d44268f174136971ecb752df602c282e48d40f43a8734e3",
+ "3077020101042079d14eacdc4f21dc5284bd8487dcb2c22e9e53e71909474f922bf695f49cf23ea00a06082a8648ce3d030107a1440342000428039292c5bcf3593639bf5835ec9411ffd3ac236c0186697623930b5ca63f32ff41df5217e7def770d9a0de87f61526497bd9aaa95d924e0a17d85958e7c095",
+ "30770201010420a6ac867ff8d00aaad23198415868a64e59217b4d22474752a146fcb52204dfa5a00a06082a8648ce3d030107a14403420004a5f37a779265c55cd4f5a7f3bffc4679395898046eb9d67d8670be39001de5a7bc010b0d218561626272989c5952e8e0d95d2590f78eec44dc62a46184956301",
+ "30770201010420df446014577f6081113cd7d33c6ba91b9ac3d083e76f8873358f83129e2d0111a00a06082a8648ce3d030107a14403420004da0c932759f50ad705507f876138c2c6e012764abc8764a6dd609e6ad06099952b120be71690bc091591f1aa8d7d6e9365deddbc958bc87ff150358ad33f7537",
+ "30770201010420b3351033eaaee3a9ea27cd7dc54aa2c8d787b14b7d428165f1a04a59c6d5b0f2a00a06082a8648ce3d030107a14403420004da3984fb8152403a9fb9068b16f9afb5c900f24230e205567b4405ee3cad2db3ff46968489d494b38d0c85fcc4aecccb61fc00dca54c8fd99ee5bf5e2616f1b7",
+ "30770201010420deedbcef7f6821f6aab2b15ce198f5eb2064f6eb461a6b7776b4da35c81b1506a00a06082a8648ce3d030107a1440342000405422b86ce66b18e68f0fb14f28e4ed9b1f7ee84f57957f4e4b4c6b0c392e6357e4698fb707f590be1b915622ec8da476071a56919211f6e5e888284d4e33f06",
+ "3077020101042078c3db0d3b1114cb99f1d0bea0d3aec9067b26964e2b85fe9df4789b24cb3da5a00a06082a8648ce3d030107a144034200046874e52d7d58b6697b407b0c0eea3cfeb528e34fca1589c5031e11aae1ad1f9280e7a4c37ddf28479cd07b4246ce9398e0e24f99946f87e08532fa26b8fb8016",
+ "30770201010420f0ba42553b146cf088d3a5a3645782fe675d23561897ced7f1270a8d05cfdaaaa00a06082a8648ce3d030107a14403420004c250e12f3aa1fb6261c57cdb091cd90d82917e103711425888477b9da4359d2803aaf0015638294c7c0baa4ec77ba8fceff5ee7f15ea087a4174f58d518006dd",
+ "307702010104207f2c0fc4b0e418b2d4c72a63fdc27f158f6ad44c26d161f489714525b6a13db1a00a06082a8648ce3d030107a144034200041d83885672021e783d8bd995d187f407bbda2c6bed5e8fabc7c6c5cb304a85eaffa12dad7ba874ac45f4258fffe07534843ff7fe76075470f2c77104d781688f",
+ "30770201010420d3de828ac9742704d4e6981ce1fc8c473e508eda3a121cda420dacbdf39d48e9a00a06082a8648ce3d030107a14403420004c78abfc4a5c0eb3ee0c9817d1790b7ca9fd528d0bc727f9daf63f4212097538b6888b9de2ae4dff29895500be456fe0ccbee340aecb546d1558b08c3718aaa4a",
+ "30770201010420d9c4e477b56f2ff0b211acd82b450336276534b350747315152a4923e6e65294a00a06082a8648ce3d030107a14403420004fbd540966b03fe2c2314f20248d345e3e9b92d6a7cfea22d1b5367f01b32d616f317e00cea1f659437b4302610abba8abb0f2bfce0a91b952e9565159c1e464e",
+ "30770201010420fb84f4a426fa12920c2cf7c2d821280530c0fa93960ded8c20120511dc1d5069a00a06082a8648ce3d030107a14403420004c0177f13c6e00bb9029df089006a332192bdf12a782c60a8d00d110c53db67c344584f22677695a7f1629db1600b0559ced49ac931b08cc6a58e5ea436bde2f8",
+ "30770201010420653ce060214028f7aa584910f0925d702bde18d52d8e530f07dd5004076eb614a00a06082a8648ce3d030107a1440342000433668d0c9085feae4b285fe260a316e24f24c0bb8e442583e23284bf5a962cd0357cd63ac4d1cdda58afb201bceee911ebe7cf134652dc4390f4e328f6cb5d65",
+ "307702010104206123b7d5b8c53b2a2a95dd2e42fe550617b7520fe9bd94a99045addb828ad847a00a06082a8648ce3d030107a1440342000487c10fdeaabf8072dcea0dc5b18be4d72f2b8298bc891ea0a11d202438b7598ac588f16a9cd697f8220434d4e15ff4c82daaae63955525633335843069434aea",
+ "3077020101042000b793c9b8553ee7bec21cd966f5aaff59a07d1fa3fa86e0164bcd2f7f4dd586a00a06082a8648ce3d030107a1440342000419d4179dbeae7fa87e356f0406c327239d34e540cd7db5174a81bd6197738bc72e46fe4bd1512dc4b35950b2c1e78e6f8f54980193be78d45e4d97a837455777",
+ "307702010104200fb1a771004f6be6300eccd603b9c9e269fbdd69e5eb183d7acad51b0b205b88a00a06082a8648ce3d030107a14403420004d3b7fa62bacff49714ef28a955cdc30f4aef323293ac3aebab824892dfa3306f2ec319f5bca1771b956b4a9b1c2f565dc08b29c07ec84623932a5d6fb59be6c7",
+ "30770201010420fe6907b91407619fdc95153cd59df061e88095678801008d3901f29c7c434243a00a06082a8648ce3d030107a14403420004796fcea7889128f8060b04e9000381fd3d80fe68f000063b182fe9d8984e740c387c4ed4c6729e8c715c576fe355a9b7dda6890c55b15ae6013fd51e8858b2f2",
+ "30770201010420111eaff6db3b279d014b45b3da091909f054f37c350c237fe9d51b4342811299a00a06082a8648ce3d030107a144034200047d51f9178725c4134579ac6d0cb84745e0d2068ccf72d30c02dd431547f868d1cb93b5774c7e1eb9582e2151521ff16cdf80b3ba4646d64f7982066f9eb679f0",
+ "30770201010420631d01e6aaa68e6c36e3425b984df02bc5b54e81951479f7cea8fd1b804bab57a00a06082a8648ce3d030107a14403420004fa1b1ed9ff904f1f050577e05b5175e897d462598fdd323c8ef25f6072dfa43034baa0119e64092fb44f7a04d59d16ba8645f52cfb7775a6536c00f7fc2ee2f1",
+ "307702010104201ec553d14d45acdf147dba5fcbc3a42a1f763411d5c206d03600ed810b0cf106a00a06082a8648ce3d030107a14403420004e9a309a24d1061204087de10e5bc64b6d45369399a5a402d630ca2d04b34ae9d27d491e5fadd5d082e14454e6b2a572a24904ba2a8dc7430b20d361134188589",
+ "307702010104206d31e401bb20968106a058f8df70cd5fb8e9aaca0b01a176649712aa594ff600a00a06082a8648ce3d030107a144034200048555a2f9e7256c57b406c729d2d8da12c009f219e81cecb522cb3c494dcc1c76ac6d2f641dafe816065482fb88916e1a719672c82406556e16c32cf90752a92f",
+ "307702010104208ada3d6ea6000cecbfcc3eafc5d1b0674fabece2b4ed8e9192200021b8861da0a00a06082a8648ce3d030107a14403420004a99e7ed75a2e28e30d8bad1a779f2a48bded02db32b22715c804d8eeadfbf453d063f099874cb170a10d613f6b6b3be0dbdb44c79fc34f81f68aeff570193e78",
+ "30770201010420d066dfb8f6ba957e19656d5b2362df0fb27075836ec7141ce344f76aa364c3cea00a06082a8648ce3d030107a14403420004597fd2183c21f6d04fa686e813cf7f838594e2e9c95b86ce34b8871674d78cc685b0918fd623e3019d8c7b67104395b1f94fc3338d0772e306572236bab59c39",
+ "307702010104202c291b04d43060f4c2fd896b7a9b6b4f847fb590f6774b78a0dff2513b32f55ca00a06082a8648ce3d030107a14403420004e80bd7e6445ee6947616e235f59bbecbaa0a49737be3b969363ee8d3cfccbbc42a0a1282de0f27c135c34afad7e5c563c674e3d18f8abcad4a73c8c79dad3efa",
+ "3077020101042029af306b5c8e677768355076ba86113411023024189e687d8b9c4dee12f156fda00a06082a8648ce3d030107a144034200049d7d21e6e1e586b5868853a3751618de597241215fb2328331d2f273299a11295fe6ccd5d990bf33cf0cdcda9944bf34094d5ffa4e5512ee4a55c9f5a8c25294",
+ "3077020101042022e65c9fc484173b9c931261d54d2cf34b70deccb19ce0a84ce3b08bc2e0648ba00a06082a8648ce3d030107a14403420004ea9ee4ab7475ebaff6ea2a290fc77aafa4b893447d1a033f40400b4d62ee923a31d06fe5f28dbc2ebec467ebd2e002a9ea72057f0b0c60fe564584a6539376ad",
+ "307702010104205000583dc21cb6fd26df1c7d6e4efb9b47ceff73c0d94ed453bae0c13a9e5795a00a06082a8648ce3d030107a144034200045a6a5b5886b01f54dfa0788f15d3542aec160843a57e723008d1b984dd572ecb8935662daaba53d756d45442efbae067f52b0b151899a645afb663205babddd3",
+ "30770201010420997431e73eae00f476bb1a221b4cc9dfd18d787be207b7069141627f61ba752da00a06082a8648ce3d030107a144034200047c89dc8c46a27e20c37b0ecf1150e8b92c2dd4dc534a25545f87a5f0c44fdbf4dee2af5bcdc4012f0acee168aeb55bb4d24738fac105fc056928ff5870491047",
+ "307702010104207dc10db95a597a80e916d7f8e4e419b609d767538fe9732bcc5f9d783c605a2ba00a06082a8648ce3d030107a144034200042e2ae4fae087a11fcdf9565670164c229337ed87b5056687c6bceeb84108db9a88b9e5d96a0cf121255ceefce0bb5239608768bb841e6687dbd9626222eb5187",
+ "307702010104209056e22b347f5f1839f1a53f1250d098616ff04db0b49b1fddb18b987930cec7a00a06082a8648ce3d030107a1440342000427cc4c7fb5d7ac047161aee78e812ad264ba25dd878684637308674ea693817b20a5e3672de6a92dfbf82f641268052fa742e6f35ff91c617334f09f89bd1218",
+ "30770201010420554ea6cfeb2cc4f1e29c08e65317d72731ee03940af9ff6a141b761d5d054db6a00a06082a8648ce3d030107a14403420004a6121746c0553ede0944da8a7f304831fcefb51b40acf78016d41cc45cc5f7e9a1b22bbea028daab5cb4c39cadf84da442749cbfc04536d6f85c3254ec7a0805",
+ "30770201010420f53ff1c7db3c4e7c734bf7396a1a5364ac2dfe4b794b118aada6bab72cde8969a00a06082a8648ce3d030107a1440342000414b11ec158e3f9d558bd1da1ed0e38c92b1ad55834f3ce08e456747279dd9ed1143cff4f5e8d70189f4b114e3cd609105d6eb8f431f392487e4c9e16a152dba1",
+ "30770201010420b3f394090547f5dcb2e77cef65e03a3b7d1c953cd0e069553da2795ab0adc950a00a06082a8648ce3d030107a14403420004a1a9dbe5d6dfa2dfb039aebabe96b12faf97c994e1430323d074ecbd90ef075e0fe9dc7d5eef2483d485ffb0b4a01b01e131754fb38059a1365d342d5175397a",
+ "30770201010420bf13c42fa84c409161f9d73ce20fd85b20c5381914aa2a2375452b34cd352022a00a06082a8648ce3d030107a14403420004e0134214a5349a235cee406ad942ca105ef871a7e4c922ef4769466d8495c78b82f6c49270c8cd913e0cf407cdab679dd9914090ea91122ca9fb654ebcfce57d",
+ "30770201010420440d975b65bf585d0813137fe041461de59221856eaf255479b5e69721cfb30da00a06082a8648ce3d030107a14403420004935a9626ddb7bd6fbcd2ad9d9333851bbc64b9997cb8e43b1a17f8e9968ed6b0e5d2edf105fbabc9bd745fa2120ac527bbfefb6e8ed96844f80b8e27b6d9a549",
+ "307702010104209ea2dc59260408165d6c42205aa52e275f81c39d9bf5b1b9c8187ade875e8068a00a06082a8648ce3d030107a14403420004bc570aa24df0306cb761ee9fb22e61f59ae4f11e8804491d8651084f191c800d1e6b16e4bc3693b88f9bef82849f3cd6914a15cae60322c1f4822a2bdf426782",
+ "30770201010420505b596fb71a2e36c0ba07da03442a721f3f1832dcac19631d6c11b36ab81986a00a06082a8648ce3d030107a1440342000472cfb26cf07faa4e6e9d328214677b5eb51cd2e35717ac661d732115e592a07482bf966a31792cc993bdf816a732069ed423871b53fb3c7eabab2f4d3d272013",
+ "3077020101042089a9d5b397c521db4bb4a5f3e8f2043e43bb5617a2070e7bfa30dd2dbf1815a1a00a06082a8648ce3d030107a1440342000468d2aeaf641b839095644cfd4b72ab97d0bf3fae1ed36e9f81d9aff333b0123f7b846f6ca61dbbd4e10988e740463addef793994a1498987883ecf237f18bc40",
+ "307702010104200919a89aedb4e20cfcd2cb568c8de18b1b60b5da17aaea3be9804eb5bc3280f5a00a06082a8648ce3d030107a14403420004139812ec6bd62fd3ce71040d87cc07671948ff82300fae5f3af80dcd4e22c870c0102c4add460b2cbbeeb298f58037fc645da20aa8f5531a5ff56d3e5b2d1944",
+ "30770201010420b145fc69cfabff378f390f0a99fb98ddc8ba9228cb1adf9c7099c6393a24567aa00a06082a8648ce3d030107a14403420004b660084cb05e005fb163011663fee6946f354714565069968f16e89e9a7aac45610f05502ff9d9e3cd0fdc88083bd8840a518b71135e59a0f0f235636d5eb7c4",
+ "3077020101042082d39168f289e784ace49bfdd523297b524c494f83fe7d04dd2f055b48d636b9a00a06082a8648ce3d030107a14403420004ea4021da5eec4e7f333059625ecbad3969676cf625cbf0da316f55f50ccd40e6174fdb7023c07abdb3ca91203acbcb5e78e1601f1a9aa616c5019ac5b2222ff4",
+ "3077020101042066a1ebc23e993674bfdc3b9721c280b7f3c1599903063ea7899b848b942a6169a00a06082a8648ce3d030107a144034200046bdb182c6c0c1f9ea898c3847bc4b46014cb8da6a02d75b7bed3c4a9a4e9c8836d4ce22fe68b68ae56a91fb435c7ea8f05bca8e8fcb1d6b77770d419f99e51da",
+ "30770201010420fa2cda21b761c46fcc5b54d47b045e24affdb95425e859bb367a07950119ab6ba00a06082a8648ce3d030107a144034200044b9e4cee102ad23fea3357f8f5f95ab9d60d34086ba4b39d5f37cbc61998ac9658ec56033ad72977d41e449d449f5aac2bc653ea8038fc04a011ff02ec49e088",
+ "3077020101042028acfb3c41b7be1d9d0506ac3702c363ffd767dd738dc8ab581ad7add2ec8872a00a06082a8648ce3d030107a144034200047467dedfb8c9a7d9496d4898d6ace0fba063545ab0d345d8b63b90871927ed269645a745a7335ca511d86a366f24e7832477842b4041a9ab564c5fbce49e4df8",
+ "307702010104202e57b8b867bd95a8dfcdd2cb8f82ea41bff21610019afd6e2367e755dec5b944a00a06082a8648ce3d030107a144034200048f97eb2d6ee2d3da8746d8d4f84469ea765fb0d1412b167b6d8a916b5f968b4d64ede5ea6d6e08ec0de192262fcb3ebed49e9d17858261affed84827b38c6cc9",
+ "3077020101042021a904281e4c31386ce34a5b52af3a068caa65819fbcf0ca76ab6041ecdaf454a00a06082a8648ce3d030107a1440342000405f9b7894a97fcddfc3285b8e974718606616fe07c70b7ab2bfb28a85fb3014c2610ab9e8e6da8ae3da032837d3a14b1e791d2633bdd8551b4817a080b9aa697",
+ "3077020101042089c2c73d08bd03da4c3111aa0b78bb1edc5243d8e119513035d3741e851dec1ca00a06082a8648ce3d030107a14403420004ec9ebc34f45150334fd1d8c92274fe43c5b3b059f15cb1963f6cf7d54bc6b1b0b4ef1c5d56d2d06ab54ce2e7606e0fa5d2f188a2d593b22d9cf6a0098aa00cb6",
+}
+
+// DecodeKey creates a test private key.
+func DecodeKey(t testing.TB, i int) *ecdsa.PrivateKey {
+ if i < 0 {
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err, "could not generate uniq key")
+
+ return key
+ }
+
+ if current, size := i, len(Keys); current >= size {
+ t.Fatalf("add more test keys, used %d from %d", current, size)
+ }
+
+ buf, err := hex.DecodeString(Keys[i])
+ require.NoError(t, err, "could not to decode hex string")
+
+ key, err := x509.ParseECPrivateKey(buf)
+ require.NoError(t, err, "could not to parse ec private key")
+ return key
+}
diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go
deleted file mode 100644
index 7373e538f..000000000
--- a/pkg/util/testing/netmap_source.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package testing
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
-)
-
-var (
- errInvalidDiff = errors.New("invalid diff")
- errNetmapNotFound = errors.New("netmap not found")
-)
-
-type TestNetmapSource struct {
- Netmaps map[uint64]*netmap.NetMap
- CurrentEpoch uint64
-}
-
-func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
- if diff >= s.CurrentEpoch {
- return nil, errInvalidDiff
- }
- return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
-}
-
-func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
- if nm, found := s.Netmaps[epoch]; found {
- return nm, nil
- }
- return nil, errNetmapNotFound
-}
-
-func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
- return s.CurrentEpoch, nil
-}
diff --git a/scripts/export-metrics/main.go b/scripts/export-metrics/main.go
index 51705ee49..f29eca37c 100644
--- a/scripts/export-metrics/main.go
+++ b/scripts/export-metrics/main.go
@@ -6,7 +6,7 @@ import (
"fmt"
"os"
- local_metrics "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ local_metrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
)
@@ -45,7 +45,7 @@ func main() {
os.Exit(1)
}
- if err := os.WriteFile(filename, data, 0o644); err != nil {
+ if err := os.WriteFile(filename, data, 0644); err != nil {
fmt.Fprintf(os.Stderr, "Could write to file: %v\n", err)
os.Exit(1)
}
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
deleted file mode 100644
index 39a420358..000000000
--- a/scripts/populate-metabase/internal/generate.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package internal
-
-import (
- cryptorand "crypto/rand"
- "crypto/sha256"
- "fmt"
- "math/rand"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
- cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
- "git.frostfs.info/TrueCloudLab/tzhash/tz"
-)
-
-func GeneratePayloadPool(count uint, size uint) [][]byte {
- var pool [][]byte
- for range count {
- payload := make([]byte, size)
- _, _ = cryptorand.Read(payload)
-
- pool = append(pool, payload)
- }
- return pool
-}
-
-func GenerateAttributePool(count uint) []objectSDK.Attribute {
- var pool []objectSDK.Attribute
- for i := range count {
- for j := range count {
- attr := *objectSDK.NewAttribute()
- attr.SetKey(fmt.Sprintf("key%d", i))
- attr.SetValue(fmt.Sprintf("value%d", j))
- pool = append(pool, attr)
- }
- }
- return pool
-}
-
-func GenerateOwnerPool(count uint) []user.ID {
- var pool []user.ID
- for range count {
- pool = append(pool, usertest.ID())
- }
- return pool
-}
-
-type ObjectOption func(obj *objectSDK.Object)
-
-func GenerateObject(options ...ObjectOption) *objectSDK.Object {
- var ver version.Version
- ver.SetMajor(2)
- ver.SetMinor(1)
-
- payload := make([]byte, 0)
-
- var csum checksum.Checksum
- csum.SetSHA256(sha256.Sum256(payload))
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- obj := objectSDK.New()
- obj.SetID(oidtest.ID())
- obj.SetOwnerID(usertest.ID())
- obj.SetContainerID(cidtest.ID())
-
- header := objecttest.Object().GetECHeader()
- header.SetParent(oidtest.ID())
- obj.SetECHeader(header)
-
- obj.SetVersion(&ver)
- obj.SetPayload(payload)
- obj.SetPayloadSize(uint64(len(payload)))
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
-
- for _, option := range options {
- option(obj)
- }
-
- return obj
-}
-
-func WithContainerID(cid cid.ID) ObjectOption {
- return func(obj *objectSDK.Object) {
- obj.SetContainerID(cid)
- }
-}
-
-func WithType(typ objectSDK.Type) ObjectOption {
- return func(obj *objectSDK.Object) {
- obj.SetType(typ)
- }
-}
-
-func WithPayloadFromPool(pool [][]byte) ObjectOption {
- payload := pool[rand.Intn(len(pool))]
-
- var csum checksum.Checksum
- csum.SetSHA256(sha256.Sum256(payload))
-
- var csumTZ checksum.Checksum
- csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
-
- return func(obj *objectSDK.Object) {
- obj.SetPayload(payload)
- obj.SetPayloadSize(uint64(len(payload)))
- obj.SetPayloadChecksum(csum)
- obj.SetPayloadHomomorphicHash(csumTZ)
- }
-}
-
-func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
- return func(obj *objectSDK.Object) {
- var attrs []objectSDK.Attribute
- for range count {
- attrs = append(attrs, pool[rand.Intn(len(pool))])
- }
- obj.SetAttributes(attrs...)
- }
-}
-
-func WithOwnerIDFromPool(pool []user.ID) ObjectOption {
- return func(obj *objectSDK.Object) {
- obj.SetOwnerID(pool[rand.Intn(len(pool))])
- }
-}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
deleted file mode 100644
index fafe61eaa..000000000
--- a/scripts/populate-metabase/internal/populate.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package internal
-
-import (
- "context"
- "fmt"
- "math/rand"
- "sync"
-
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "golang.org/x/sync/errgroup"
-)
-
-type EpochState struct{}
-
-func (s EpochState) CurrentEpoch() uint64 {
- return 0
-}
-
-func PopulateWithObjects(
- ctx context.Context,
- db *meta.DB,
- group *errgroup.Group,
- count uint,
- factory func() *objectSDK.Object,
-) {
- digits := "0123456789"
-
- for range count {
- obj := factory()
- id := fmt.Appendf(nil, "%c/%c/%c",
- digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)],
- digits[rand.Int()%len(digits)])
-
- prm := meta.PutPrm{}
- prm.SetObject(obj)
- prm.SetStorageID(id)
-
- group.Go(func() error {
- if _, err := db.Put(ctx, prm); err != nil {
- return fmt.Errorf("couldn't put an object: %w", err)
- }
- return nil
- })
- }
-}
-
-func PopulateWithBigObjects(
- ctx context.Context,
- db *meta.DB,
- group *errgroup.Group,
- count uint,
- factory func() *objectSDK.Object,
-) {
- for range count {
- group.Go(func() error {
- if err := populateWithBigObject(ctx, db, factory); err != nil {
- return fmt.Errorf("couldn't put a big object: %w", err)
- }
- return nil
- })
- }
-}
-
-func populateWithBigObject(
- ctx context.Context,
- db *meta.DB,
- factory func() *objectSDK.Object,
-) error {
- t := &target{db: db}
-
- pk, _ := keys.NewPrivateKey()
- p := transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: &pk.PrivateKey,
- NextTargetInit: func() transformer.ObjectWriter { return t },
- NetworkState: EpochState{},
- MaxSize: 10,
- })
-
- obj := factory()
- payload := make([]byte, 30)
-
- err := p.WriteHeader(ctx, obj)
- if err != nil {
- return err
- }
-
- _, err = p.Write(ctx, payload)
- if err != nil {
- return err
- }
-
- _, err = p.Close(ctx)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type target struct {
- db *meta.DB
-}
-
-func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- prm := meta.PutPrm{}
- prm.SetObject(obj)
-
- _, err := t.db.Put(ctx, prm)
- return err
-}
-
-func PopulateGraveyard(
- ctx context.Context,
- db *meta.DB,
- group *errgroup.Group,
- workBufferSize int,
- count uint,
- factory func() *objectSDK.Object,
-) {
- ts := factory()
- ts.SetType(objectSDK.TypeTombstone)
-
- prm := meta.PutPrm{}
- prm.SetObject(ts)
-
- group.Go(func() error {
- if _, err := db.Put(ctx, prm); err != nil {
- return fmt.Errorf("couldn't put a tombstone object: %w", err)
- }
- return nil
- })
-
- cID, _ := ts.ContainerID()
- oID, _ := ts.ID()
-
- var tsAddr oid.Address
-
- tsAddr.SetContainer(cID)
- tsAddr.SetObject(oID)
-
- addrs := make(chan oid.Address, workBufferSize)
-
- go func() {
- defer close(addrs)
-
- wg := &sync.WaitGroup{}
- wg.Add(int(count))
-
- for range count {
- obj := factory()
-
- prm := meta.PutPrm{}
- prm.SetObject(obj)
-
- group.Go(func() error {
- defer wg.Done()
-
- if _, err := db.Put(ctx, prm); err != nil {
- return fmt.Errorf("couldn't put an object: %w", err)
- }
-
- cID, _ := obj.ContainerID()
- oID, _ := obj.ID()
-
- var addr oid.Address
- addr.SetContainer(cID)
- addr.SetObject(oID)
-
- addrs <- addr
- return nil
- })
- }
- wg.Wait()
- }()
-
- go func() {
- for addr := range addrs {
- prm := meta.InhumePrm{}
- prm.SetAddresses(addr)
- prm.SetTombstoneAddress(tsAddr)
-
- group.Go(func() error {
- if _, err := db.Inhume(ctx, prm); err != nil {
- return fmt.Errorf("couldn't inhume an object: %w", err)
- }
- return nil
- })
- }
- }()
-}
-
-func PopulateLocked(
- ctx context.Context,
- db *meta.DB,
- group *errgroup.Group,
- workBufferSize int,
- count uint,
- factory func() *objectSDK.Object,
-) {
- locker := factory()
- locker.SetType(objectSDK.TypeLock)
-
- prm := meta.PutPrm{}
- prm.SetObject(locker)
-
- group.Go(func() error {
- if _, err := db.Put(ctx, prm); err != nil {
- return fmt.Errorf("couldn't put a locker object: %w", err)
- }
- return nil
- })
-
- ids := make(chan oid.ID, workBufferSize)
-
- go func() {
- defer close(ids)
-
- wg := &sync.WaitGroup{}
- wg.Add(int(count))
-
- for range count {
- defer wg.Done()
-
- obj := factory()
-
- prm := meta.PutPrm{}
- prm.SetObject(obj)
-
- group.Go(func() error {
- if _, err := db.Put(ctx, prm); err != nil {
- return fmt.Errorf("couldn't put an object: %w", err)
- }
-
- id, _ := obj.ID()
- ids <- id
- return nil
- })
- }
- wg.Wait()
- }()
-
- go func() {
- for id := range ids {
- lockerCID, _ := locker.ContainerID()
- lockerOID, _ := locker.ID()
-
- group.Go(func() error {
- if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil {
- return fmt.Errorf("couldn't lock an object: %w", err)
- }
- return nil
- })
- }
- }()
-}
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
deleted file mode 100644
index 8c4ea41ad..000000000
--- a/scripts/populate-metabase/main.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package main
-
-import (
- "context"
- "errors"
- "flag"
- "fmt"
- "os"
-
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "golang.org/x/sync/errgroup"
-)
-
-var (
- path string
- force bool
- jobs uint
-
- numContainers,
- numObjects,
- numAttributesPerObj,
- numOwners,
- numPayloads,
- numAttributes uint
-)
-
-func main() {
- flag.StringVar(&path, "path", "", "Path to metabase")
- flag.BoolVar(&force, "force", false, "Rewrite existing database")
- flag.UintVar(&jobs, "j", 10000, "Number of jobs to run")
-
- flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created")
- flag.UintVar(&numObjects, "objects", 0, "Number of objects per container")
- flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object")
-
- flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used")
- flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used")
- flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used")
-
- flag.Parse()
-
- exitIf(numPayloads == 0, "must have payloads\n")
- exitIf(numAttributes == 0, "must have attributes\n")
- exitIf(numOwners == 0, "must have owners\n")
- exitIf(len(path) == 0, "path to metabase not specified\n")
- exitIf(
- numAttributesPerObj > numAttributes,
- "object can't have more attributes than available\n",
- )
-
- info, err := os.Stat(path)
- exitIf(
- err != nil && !errors.Is(err, os.ErrNotExist),
- "couldn't get path info: %s\n", err,
- )
-
- // Path exits.
- if err == nil {
- exitIf(info.IsDir(), "path is a directory\n")
- exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n")
-
- err = os.Remove(path)
- exitIf(err != nil, "couldn't remove existing file: %s\n", err)
- }
-
- err = populate()
- exitIf(err != nil, "couldn't populate the metabase: %s\n", err)
-}
-
-func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object {
- return func() *objectSDK.Object {
- return internal.GenerateObject(opts...)
- }
-}
-
-func populate() (err error) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- db := meta.New([]meta.Option{
- meta.WithPath(path),
- meta.WithPermissions(0o600),
- meta.WithEpochState(internal.EpochState{}),
- }...)
-
- if err = db.Open(ctx, mode.ReadWrite); err != nil {
- return fmt.Errorf("couldn't open the metabase: %w", err)
- }
- defer func() {
- if errOnClose := db.Close(ctx); errOnClose != nil {
- err = errors.Join(
- err,
- fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)),
- )
- }
- }()
-
- if err = db.Init(ctx); err != nil {
- return fmt.Errorf("couldn't init the metabase: %w", err)
- }
-
- payloads := internal.GeneratePayloadPool(numPayloads, 32)
- attributes := internal.GenerateAttributePool(numAttributes)
- owners := internal.GenerateOwnerPool(numOwners)
-
- types := []objectSDK.Type{
- objectSDK.TypeRegular,
- objectSDK.TypeLock,
- objectSDK.TypeTombstone,
- }
-
- eg, ctx := errgroup.WithContext(ctx)
- eg.SetLimit(int(jobs))
-
- for range numContainers {
- cid := cidtest.ID()
-
- for _, typ := range types {
- internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory(
- internal.WithContainerID(cid),
- internal.WithType(typ),
- internal.WithPayloadFromPool(payloads),
- internal.WithOwnerIDFromPool(owners),
- internal.WithAttributesFromPool(attributes, numAttributesPerObj),
- ))
- }
- internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory(
- internal.WithContainerID(cid),
- internal.WithType(objectSDK.TypeRegular),
- internal.WithAttributesFromPool(attributes, numAttributesPerObj),
- internal.WithOwnerIDFromPool(owners),
- ))
- internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
- internal.WithContainerID(cid),
- internal.WithType(objectSDK.TypeRegular),
- internal.WithAttributesFromPool(attributes, numAttributesPerObj),
- internal.WithOwnerIDFromPool(owners),
- ))
- internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
- internal.WithContainerID(cid),
- internal.WithType(objectSDK.TypeRegular),
- internal.WithAttributesFromPool(attributes, numAttributesPerObj),
- internal.WithOwnerIDFromPool(owners),
- ))
- }
-
- return eg.Wait()
-}
-
-func exitIf(cond bool, format string, args ...any) {
- if cond {
- fmt.Fprintf(os.Stderr, format, args...)
- os.Exit(1)
- }
-}